Compare commits
69 Commits
Author | SHA1 | Date | |
---|---|---|---|
1a15e433ba | |||
816121fbcf | |||
e82e97a9d7 | |||
6102f1d66e | |||
701b2f3bfd | |||
bac89d8301 | |||
b3dfb2bc7b | |||
1fb7f0e6c8 | |||
2241364d00 | |||
f63e270c73 | |||
ccab367351 | |||
67ddc148a9 | |||
31afdac3af | |||
1344246287 | |||
7b25873438 | |||
d572b4f753 | |||
cd69b5a78f | |||
01ad9b71ba | |||
b64daaf034 | |||
c7837bf758 | |||
448e6bb72a | |||
f7d3760af7 | |||
2acb05741e | |||
8104ee9df2 | |||
ab8566df03 | |||
94fa51ffb2 | |||
1c823e4353 | |||
f6ce0c102b | |||
a1a47c905d | |||
d3e32f944a | |||
3bf9f02a9f | |||
533c767e1d | |||
84d55dbc8a | |||
5aafff6310 | |||
840aaf64ff | |||
7bbb56a328 | |||
394b08b2fe | |||
842252f9e2 | |||
279f5616d1 | |||
04bb88f74b | |||
38ed6dbc5f | |||
ec2f8d0c58 | |||
9b51065414 | |||
1a70603b1a | |||
505335d872 | |||
bdc8c65a09 | |||
03fe29b088 | |||
5857efdd70 | |||
bdffe7b14e | |||
ae7cde0433 | |||
6293d37e73 | |||
7a3fe5a1df | |||
2a32c179ba | |||
0a02bde219 | |||
966aa9ce8c | |||
6f18d56ca0 | |||
c400461835 | |||
a6691de1b7 | |||
e6934cda02 | |||
4cf7956ff5 | |||
5356455904 | |||
074aded5c0 | |||
c7557f761f | |||
09d6460a13 | |||
40c99e9ef3 | |||
75ff5510d4 | |||
1370d24985 | |||
ef50c1957b | |||
41ea6ef6f5 |
@ -5,18 +5,18 @@ info:
|
||||
repository_url: https://github.com/dosco/super-graph
|
||||
options:
|
||||
commits:
|
||||
# filters:
|
||||
# Type:
|
||||
# - feat
|
||||
# - fix
|
||||
# - perf
|
||||
# - refactor
|
||||
filters:
|
||||
Type:
|
||||
- feat
|
||||
- fix
|
||||
- perf
|
||||
- refactor
|
||||
commit_groups:
|
||||
# title_maps:
|
||||
# feat: Features
|
||||
# fix: Bug Fixes
|
||||
# perf: Performance Improvements
|
||||
# refactor: Code Refactoring
|
||||
title_maps:
|
||||
feat: Features
|
||||
fix: Bug Fixes
|
||||
perf: Performance Improvements
|
||||
refactor: Code Refactoring
|
||||
header:
|
||||
pattern: "^((\\w+)\\s.*)$"
|
||||
pattern_maps:
|
||||
|
8
.deepsource.toml
Normal file
8
.deepsource.toml
Normal file
@ -0,0 +1,8 @@
|
||||
version = 1
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_path = "github.com/dosco/super-graph"
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -23,16 +23,20 @@
|
||||
/tmp/runner-build
|
||||
/demo/tmp
|
||||
|
||||
.idea
|
||||
*.iml
|
||||
.vscode
|
||||
.DS_Store
|
||||
.swp
|
||||
.release
|
||||
main
|
||||
super-graph
|
||||
supergraph
|
||||
*-fuzz.zip
|
||||
crashers
|
||||
suppressions
|
||||
release
|
||||
.gofuzz
|
||||
*-fuzz.zip
|
||||
*.test
|
||||
.firebase
|
||||
|
||||
|
@ -7,7 +7,7 @@ rules:
|
||||
- name: run
|
||||
match: \.go$
|
||||
ignore: web|examples|docs|_test\.go$
|
||||
command: go run cmd/main.go serv
|
||||
command: go run main.go serv
|
||||
- name: test
|
||||
match: _test\.go$
|
||||
command: go test -cover {PKG}
|
570
CHANGELOG.md
570
CHANGELOG.md
@ -1,401 +1,371 @@
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased]
|
||||
|
||||
### Add
|
||||
- Add config driven custom table relationships
|
||||
- Add support for `websearch_to_tsquery` in PG 11
|
||||
|
||||
### Create
|
||||
- Create CODE_OF_CONDUCT.md
|
||||
<a name="v0.13.22"></a>
|
||||
## [v0.13.22] - 2020-05-01
|
||||
|
||||
### Fix
|
||||
- Fix bug with remote join example
|
||||
- Fix grammer / syntax
|
||||
<a name="v0.13.21"></a>
|
||||
## [v0.13.21] - 2020-04-24
|
||||
|
||||
### Update
|
||||
- Update issue templates
|
||||
- Update CONTRIBUTING.md
|
||||
- Update issue templates
|
||||
- Update feature_request.md
|
||||
<a name="v0.13.20"></a>
|
||||
## [v0.13.20] - 2020-04-24
|
||||
|
||||
<a name="v0.13.19"></a>
|
||||
## [v0.13.19] - 2020-04-23
|
||||
|
||||
<a name="v0.13.18"></a>
|
||||
## [v0.13.18] - 2020-04-23
|
||||
|
||||
<a name="v0.13.17"></a>
|
||||
## [v0.13.17] - 2020-04-22
|
||||
|
||||
<a name="v0.13.16"></a>
|
||||
## [v0.13.16] - 2020-04-21
|
||||
### Features
|
||||
- feat : improve the generated introspection schema and avoid the chirino/graphql api leaking through the core api. ([#53](https://github.com/dosco/super-graph/issues/53))
|
||||
|
||||
|
||||
<a name="v0.13.15"></a>
|
||||
## [v0.13.15] - 2020-04-20
|
||||
|
||||
<a name="v0.13.14"></a>
|
||||
## [v0.13.14] - 2020-04-19
|
||||
|
||||
<a name="v0.13.13"></a>
|
||||
## [v0.13.13] - 2020-04-19
|
||||
|
||||
<a name="v0.13.12"></a>
|
||||
## [v0.13.12] - 2020-04-19
|
||||
|
||||
<a name="v0.13.11"></a>
|
||||
## [v0.13.11] - 2020-04-18
|
||||
|
||||
<a name="v0.13.10"></a>
|
||||
## [v0.13.10] - 2020-04-17
|
||||
|
||||
<a name="v0.13.9"></a>
|
||||
## [v0.13.9] - 2020-04-16
|
||||
|
||||
<a name="v0.13.8"></a>
|
||||
## [v0.13.8] - 2020-04-16
|
||||
|
||||
<a name="v0.13.7"></a>
|
||||
## [v0.13.7] - 2020-04-16
|
||||
|
||||
<a name="v0.13.6"></a>
|
||||
## [v0.13.6] - 2020-04-13
|
||||
|
||||
<a name="v0.13.5"></a>
|
||||
## [v0.13.5] - 2020-04-13
|
||||
|
||||
<a name="v0.13.4"></a>
|
||||
## [v0.13.4] - 2020-04-12
|
||||
|
||||
<a name="v0.13.3"></a>
|
||||
## [v0.13.3] - 2020-04-12
|
||||
|
||||
<a name="v0.13.2"></a>
|
||||
## [v0.13.2] - 2020-04-11
|
||||
|
||||
<a name="v0.13.1"></a>
|
||||
## [v0.13.1] - 2020-04-11
|
||||
|
||||
<a name="v0.13.0"></a>
|
||||
## [v0.13.0] - 2020-04-10
|
||||
|
||||
<a name="v0.12.49"></a>
|
||||
## [v0.12.49] - 2020-04-01
|
||||
|
||||
<a name="v0.12.48"></a>
|
||||
## [v0.12.48] - 2020-03-31
|
||||
|
||||
<a name="v0.12.47"></a>
|
||||
## [v0.12.47] - 2020-03-30
|
||||
|
||||
<a name="v0.12.46"></a>
|
||||
## [v0.12.46] - 2020-03-21
|
||||
|
||||
<a name="v0.12.45"></a>
|
||||
## [v0.12.45] - 2020-03-18
|
||||
|
||||
<a name="v0.12.44"></a>
|
||||
## [v0.12.44] - 2020-03-16
|
||||
|
||||
<a name="v0.12.43"></a>
|
||||
## [v0.12.43] - 2020-03-16
|
||||
|
||||
<a name="v0.12.42"></a>
|
||||
## [v0.12.42] - 2020-03-14
|
||||
|
||||
<a name="v0.12.41"></a>
|
||||
## [v0.12.41] - 2020-03-06
|
||||
|
||||
<a name="v0.12.40"></a>
|
||||
## [v0.12.40] - 2020-03-06
|
||||
|
||||
<a name="v0.12.39"></a>
|
||||
## [v0.12.39] - 2020-03-06
|
||||
|
||||
<a name="v0.12.38"></a>
|
||||
## [v0.12.38] - 2020-03-05
|
||||
|
||||
<a name="v0.12.37"></a>
|
||||
## [v0.12.37] - 2020-03-04
|
||||
|
||||
<a name="v0.12.36"></a>
|
||||
## [v0.12.36] - 2020-03-04
|
||||
|
||||
<a name="v0.12.35"></a>
|
||||
## [v0.12.35] - 2020-03-03
|
||||
|
||||
<a name="v0.12.34"></a>
|
||||
## [v0.12.34] - 2020-03-03
|
||||
|
||||
<a name="v0.12.33"></a>
|
||||
## [v0.12.33] - 2020-02-29
|
||||
|
||||
<a name="v0.12.32"></a>
|
||||
## [v0.12.32] - 2020-02-24
|
||||
### Bug Fixes
|
||||
- fix "Try the demo app" in docs ([#38](https://github.com/dosco/super-graph/issues/38))
|
||||
|
||||
|
||||
<a name="v0.12.31"></a>
|
||||
## [v0.12.31] - 2020-02-23
|
||||
|
||||
<a name="v0.12.30"></a>
|
||||
## [v0.12.30] - 2020-02-23
|
||||
|
||||
<a name="v0.12.29"></a>
|
||||
## [v0.12.29] - 2020-02-21
|
||||
|
||||
<a name="v0.12.28"></a>
|
||||
## [v0.12.28] - 2020-02-20
|
||||
|
||||
<a name="v0.12.27"></a>
|
||||
## [v0.12.27] - 2020-02-19
|
||||
|
||||
<a name="v0.12.26"></a>
|
||||
## [v0.12.26] - 2020-02-11
|
||||
|
||||
<a name="v0.12.25"></a>
|
||||
## [v0.12.25] - 2020-02-10
|
||||
|
||||
<a name="v0.12.24"></a>
|
||||
## [v0.12.24] - 2020-02-03
|
||||
|
||||
<a name="v0.12.23"></a>
|
||||
## [v0.12.23] - 2020-02-02
|
||||
|
||||
<a name="v0.12.22"></a>
|
||||
## [v0.12.22] - 2020-02-01
|
||||
|
||||
<a name="v0.12.21"></a>
|
||||
## [v0.12.21] - 2020-01-31
|
||||
|
||||
<a name="v0.12.20"></a>
|
||||
## [v0.12.20] - 2020-01-28
|
||||
|
||||
<a name="v0.12.19"></a>
|
||||
## [v0.12.19] - 2020-01-26
|
||||
|
||||
<a name="v0.12.18"></a>
|
||||
## [v0.12.18] - 2020-01-20
|
||||
|
||||
<a name="v0.12.17"></a>
|
||||
## [v0.12.17] - 2020-01-20
|
||||
|
||||
<a name="v0.12.16"></a>
|
||||
## [v0.12.16] - 2020-01-19
|
||||
|
||||
<a name="v0.12.15"></a>
|
||||
## [v0.12.15] - 2020-01-17
|
||||
|
||||
<a name="v0.12.14"></a>
|
||||
## [v0.12.14] - 2020-01-17
|
||||
|
||||
<a name="v0.12.13"></a>
|
||||
## [v0.12.13] - 2020-01-16
|
||||
|
||||
<a name="v0.12.12"></a>
|
||||
## [v0.12.12] - 2020-01-15
|
||||
|
||||
<a name="v0.12.11"></a>
|
||||
## [v0.12.11] - 2020-01-14
|
||||
|
||||
<a name="v0.12.10"></a>
|
||||
## [v0.12.10] - 2020-01-14
|
||||
|
||||
<a name="v0.12.9"></a>
|
||||
## [v0.12.9] - 2020-01-14
|
||||
|
||||
<a name="v0.12.8"></a>
|
||||
## [v0.12.8] - 2020-01-13
|
||||
|
||||
<a name="v0.12.7"></a>
|
||||
## [v0.12.7] - 2020-01-11
|
||||
### Pull Requests
|
||||
- Merge pull request [#22](https://github.com/dosco/super-graph/issues/22) from bhaskarmurthy/fix-grammer-syntax
|
||||
|
||||
|
||||
<a name="v0.12.6"></a>
|
||||
## [v0.12.6] - 2019-12-02
|
||||
### Add
|
||||
- Add support for `websearch_to_tsquery` in PG 11
|
||||
|
||||
|
||||
<a name="v0.12.5"></a>
|
||||
## [v0.12.5] - 2019-11-30
|
||||
### Add
|
||||
- Add a guide to the internals of the codebase
|
||||
- Add a CONTRIBUTING.md guide for contributors
|
||||
- Add a CHANGLOG.md
|
||||
- Add issue templates
|
||||
|
||||
### Fix
|
||||
- Fix for missing filters on nested selectors
|
||||
|
||||
### Refactor
|
||||
- Refactor rename 'Select.Table` to `Select.Name`
|
||||
|
||||
|
||||
<a name="v0.12.4"></a>
|
||||
## [v0.12.4] - 2019-11-28
|
||||
### Move
|
||||
- Move license from MIT to Apache 2.0. Add Makefile
|
||||
|
||||
|
||||
<a name="v0.12.3"></a>
|
||||
## [v0.12.3] - 2019-11-26
|
||||
### Added
|
||||
- Added support for query names to the allow.list
|
||||
|
||||
|
||||
<a name="v0.12.2"></a>
|
||||
## [v0.12.2] - 2019-11-25
|
||||
### Fix
|
||||
- Fix bug with compiling anon queries
|
||||
|
||||
|
||||
<a name="v0.12.1"></a>
|
||||
## [v0.12.1] - 2019-11-22
|
||||
### Move
|
||||
- Move sql query logging from info to debug
|
||||
|
||||
|
||||
<a name="v0.12.0"></a>
|
||||
## [v0.12.0] - 2019-11-22
|
||||
### Use
|
||||
- Use logger error instead of panic in goja handlers
|
||||
|
||||
|
||||
<a name="v0.11.9"></a>
|
||||
## [v0.11.9] - 2019-11-22
|
||||
### Add
|
||||
- Add a db:reset command only for dev mode
|
||||
|
||||
|
||||
<a name="v0.11.8"></a>
|
||||
## [v0.11.8] - 2019-11-21
|
||||
### Optimize
|
||||
- Optimize db queries limit use of transactions
|
||||
|
||||
|
||||
<a name="v0.11.7"></a>
|
||||
## [v0.11.7] - 2019-11-19
|
||||
### Added
|
||||
- Added support for multi-root queries
|
||||
|
||||
|
||||
<a name="v0.11.6"></a>
|
||||
## [v0.11.6] - 2019-11-15
|
||||
### Fix
|
||||
- Fix issues with JWT auth
|
||||
- Fix bug with migration filename generation
|
||||
- Fix bug with migration file name
|
||||
|
||||
|
||||
<a name="v0.11.5"></a>
|
||||
## [v0.11.5] - 2019-11-10
|
||||
### Fix
|
||||
- Fix bug with migration template name
|
||||
|
||||
|
||||
<a name="v0.11.4"></a>
|
||||
## [v0.11.4] - 2019-11-10
|
||||
### Fix
|
||||
- Fix bug with creating new migrations
|
||||
|
||||
|
||||
<a name="v0.11.3"></a>
|
||||
## [v0.11.3] - 2019-11-09
|
||||
### Fix
|
||||
- Fix macro syntax bug in app templates
|
||||
|
||||
|
||||
<a name="v0.11.2"></a>
|
||||
## [v0.11.2] - 2019-11-07
|
||||
### Fix
|
||||
- Fix bugs and add new production mode
|
||||
|
||||
|
||||
<a name="v0.11.1"></a>
|
||||
## [v0.11.1] - 2019-11-05
|
||||
### Add
|
||||
- Add nested where clause to filter based on related tables
|
||||
|
||||
### Block
|
||||
- Block unauthorized requests when 'anon' role is not defined
|
||||
|
||||
### Update
|
||||
- Update docs and website with new features
|
||||
|
||||
|
||||
<a name="v0.11"></a>
|
||||
## [v0.11] - 2019-11-01
|
||||
### Add
|
||||
- Add config driven presets for insert, update and upsert
|
||||
- Add config driven presets for insert, update and upserta
|
||||
- Add RBAC option to disable functions eg. count
|
||||
- Add fuzz testing to 'serv' for the GQL hash parser
|
||||
- Add fuzz testing to 'jsn' and 'qcode'
|
||||
- Add ability to block queries and mutations by role
|
||||
- Add built in 'anon' and 'user' roles
|
||||
- Add role based access control
|
||||
|
||||
### Allow
|
||||
- Allow config files to inherit from other config files
|
||||
|
||||
### Change
|
||||
- Change config key inherit to inherits
|
||||
|
||||
### Get
|
||||
- Get RBAC working for queries and mutations
|
||||
|
||||
### Optimize
|
||||
- Optimize prepared statement flow for RBAC
|
||||
|
||||
### Preserve
|
||||
- Preserve allow.list ordering on save
|
||||
|
||||
### Update
|
||||
- Update filters section in guide
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#11](https://github.com/dosco/super-graph/issues/11) from dosco/rbac
|
||||
|
||||
|
||||
<a name="v0.10.1"></a>
|
||||
## [v0.10.1] - 2019-10-06
|
||||
### Add
|
||||
- Add ability to set filters per operation / action
|
||||
- Add upsert mutation
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#10](https://github.com/dosco/super-graph/issues/10) from FourSigma/sm-examples-folder
|
||||
|
||||
|
||||
<a name="v0.10"></a>
|
||||
## [v0.10] - 2019-10-04
|
||||
### Fix
|
||||
- Fix return values for bulk mutations and delete
|
||||
- Fix issues with mutation SQL
|
||||
- Fix broken demo app
|
||||
- Fix typo in 'across'
|
||||
|
||||
### Remove
|
||||
- Remove extra link from README
|
||||
|
||||
### Update
|
||||
- Update docs, getting started guide and mutations
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#6](https://github.com/dosco/super-graph/issues/6) from muesli/typo-fixes
|
||||
|
||||
|
||||
<a name="v0.9"></a>
|
||||
## [v0.9] - 2019-10-01
|
||||
### Fix
|
||||
- Fix demo rails app broken build
|
||||
|
||||
|
||||
<a name="v0.8"></a>
|
||||
## [v0.8] - 2019-09-30
|
||||
### Fix
|
||||
- Fix invalid import bug
|
||||
|
||||
### Update
|
||||
- Update documentation site
|
||||
|
||||
|
||||
<a name="v0.7"></a>
|
||||
## [v0.7] - 2019-09-29
|
||||
### Failure
|
||||
- Failure to prepare statements should be a warning
|
||||
|
||||
### Fix
|
||||
- Fix duplicte column bug
|
||||
|
||||
|
||||
<a name="v0.6"></a>
|
||||
## [v0.6] - 2019-09-29
|
||||
### Add
|
||||
- Add database setup commands
|
||||
- Add binary compression back to Dockerfile
|
||||
- Add initialization command to setup new apps
|
||||
- Add migrate command
|
||||
- Add database seeding capability
|
||||
- Add session variable for user id
|
||||
- Add delete mutation
|
||||
- Add update mutation
|
||||
- Add insert mutation with bulk insert
|
||||
- Add GoTO Aug, 19 presentation
|
||||
- Add support for prepared statements
|
||||
- Add end-to-end benchmaking
|
||||
- Add object pooling for parser expressions
|
||||
- Add request / response debugging for remote joins
|
||||
- Add a presentation about GraphQL
|
||||
- Add validation for remote JSON
|
||||
- Add tracing for API stitching
|
||||
- Add REST API stitching
|
||||
- Add SQL query cacheing
|
||||
- Add support for GraphQL variables
|
||||
- Add fuzz testing to qcode
|
||||
- Add test for Rails Redis cookie store integration
|
||||
- Add an install guide
|
||||
|
||||
### Change
|
||||
- Change fuzz test name to qcode
|
||||
- Change logo from PNG to SVG
|
||||
|
||||
### Enabke
|
||||
- Enabke reload on config change
|
||||
|
||||
### Fix
|
||||
- Fix missing config name bug
|
||||
- Fix new app templates
|
||||
- Fix help message for migrate
|
||||
- Fix session variable bug
|
||||
- Fix test failures in `psql` and `serv`
|
||||
- Fix demo docker services startup order
|
||||
- Fix wrong value for false token bug. Reported by [@ThisIsMissEm](https://github.com/ThisIsMissEm)
|
||||
- Fix allow.list file discovery bug
|
||||
- Fix bug with allow list path
|
||||
- Fix wrong value for use_allow_list in dev config
|
||||
- Fix startup bug in demo script
|
||||
- Fix url bug in allow list
|
||||
- Fix bug [#676](https://github.com/dosco/super-graph/issues/676) found by fuzzer
|
||||
- Fix race-condition in remote joins
|
||||
- Fix cookie passing in web ui
|
||||
- Fix bug with passing cookies in web ui
|
||||
- Fix null pointer with invalid argument values
|
||||
- Fix infinite loop bug in lexer
|
||||
- Fix null pointer issue found by fuzz test
|
||||
- Fix issue with fuzzbuzz config
|
||||
- Fix demo to run as memory only
|
||||
- Fix auth documentation
|
||||
- Fix issue with web ui sizing
|
||||
- Fix issue preventing docker-compose deploy
|
||||
- Fix try demo documentation
|
||||
|
||||
### Futher
|
||||
- Futher reduce allocations across hot paths
|
||||
- Futher reduce allocations on the compiler hot path
|
||||
- Futher optimize json parsing and editing performance
|
||||
|
||||
### Highlight
|
||||
- Highlight top features better on the site
|
||||
|
||||
### Improve
|
||||
- Improve readability of json parser code
|
||||
- Improve the motivation section in the readme
|
||||
- Improve the demo experience
|
||||
|
||||
### Make
|
||||
- Make remote joins use parallel http requests
|
||||
|
||||
### Merge
|
||||
- Merge branch 'master' into optimize-psql
|
||||
|
||||
### New
|
||||
- New low allocation fast json parsing and editing library
|
||||
|
||||
### Optimize
|
||||
- Optimize lexer and fix bugs
|
||||
- Optimize the sql generator hot path
|
||||
|
||||
### Reduce
|
||||
- Reduce alllocations done by the stack
|
||||
- Reduce steps to run the demo
|
||||
- Reduce allocations and improve perf over 50%
|
||||
|
||||
### Remove
|
||||
- Remove unused packages
|
||||
- Remove the 'hello' test app folder
|
||||
- Remove other allocations in psql
|
||||
|
||||
### Use
|
||||
- Use hash's as ids for table relationships
|
||||
|
||||
### Watch
|
||||
- Watch and reload on config changes
|
||||
|
||||
|
||||
<a name="v0.5"></a>
|
||||
## [v0.5] - 2019-04-10
|
||||
### Add
|
||||
- Add supprt for new Rails 5.2 aes-256-gcm cookies
|
||||
- Add query support for ts_rank and ts_headline
|
||||
- Add full text search support using TSV indexes
|
||||
- Add missing assets folder
|
||||
- Add fetch by ID feature
|
||||
- Add documentation
|
||||
|
||||
### Cleanup
|
||||
- Cleanup and redesign config files
|
||||
|
||||
### Fix
|
||||
- Fix bug with auth config parsing
|
||||
|
||||
### Redesign
|
||||
- Redesign config file architecture
|
||||
|
||||
### Reduce
|
||||
- Reduce realloc of maps and slices
|
||||
|
||||
### Update
|
||||
- Update docs with full-text search information
|
||||
|
||||
|
||||
<a name="v0.4"></a>
|
||||
## [v0.4] - 2019-04-01
|
||||
|
||||
<a name="v0.3"></a>
|
||||
## [v0.3] - 2019-04-01
|
||||
### Add
|
||||
- Add SQL execution timing and tracing
|
||||
- Add support for HAVING with aggregate queries
|
||||
- Add aggregrate functions to GQL queries
|
||||
- Add Auth0 JWT support
|
||||
- Add React UI building to the docker build flow
|
||||
- Add compiler profiling
|
||||
- Add bechmarks for GQL to SQL compile
|
||||
- Add tests for gql to sql compile
|
||||
|
||||
### Cleanup
|
||||
- Cleanup Dockerfile
|
||||
|
||||
### Fix
|
||||
- Fix recurring packer issue docker hub builds
|
||||
- Fix issue with asset packer breaking Docker builds
|
||||
- Fix missing git package in Dockerfile
|
||||
- Fix docker ignore values
|
||||
- Fix image build failure on docker hub
|
||||
- Fix build issue in Dockerfile
|
||||
- Fix bugs and document the 'where' clause
|
||||
- Fix perf issue with inflections
|
||||
|
||||
### Optimize
|
||||
- Optimize docker image
|
||||
|
||||
### Pack
|
||||
- Pack web UI with app into a single binary
|
||||
|
||||
### Upgrade
|
||||
- Upgrade web UI packages
|
||||
|
||||
|
||||
<a name="0.3"></a>
|
||||
## 0.3 - 2019-03-24
|
||||
### First
|
||||
- First commit
|
||||
|
||||
### Fix
|
||||
- Fix license to MIT
|
||||
|
||||
|
||||
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.12.6...HEAD
|
||||
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.13.22...HEAD
|
||||
[v0.13.22]: https://github.com/dosco/super-graph/compare/v0.13.21...v0.13.22
|
||||
[v0.13.21]: https://github.com/dosco/super-graph/compare/v0.13.20...v0.13.21
|
||||
[v0.13.20]: https://github.com/dosco/super-graph/compare/v0.13.19...v0.13.20
|
||||
[v0.13.19]: https://github.com/dosco/super-graph/compare/v0.13.18...v0.13.19
|
||||
[v0.13.18]: https://github.com/dosco/super-graph/compare/v0.13.17...v0.13.18
|
||||
[v0.13.17]: https://github.com/dosco/super-graph/compare/v0.13.16...v0.13.17
|
||||
[v0.13.16]: https://github.com/dosco/super-graph/compare/v0.13.15...v0.13.16
|
||||
[v0.13.15]: https://github.com/dosco/super-graph/compare/v0.13.14...v0.13.15
|
||||
[v0.13.14]: https://github.com/dosco/super-graph/compare/v0.13.13...v0.13.14
|
||||
[v0.13.13]: https://github.com/dosco/super-graph/compare/v0.13.12...v0.13.13
|
||||
[v0.13.12]: https://github.com/dosco/super-graph/compare/v0.13.11...v0.13.12
|
||||
[v0.13.11]: https://github.com/dosco/super-graph/compare/v0.13.10...v0.13.11
|
||||
[v0.13.10]: https://github.com/dosco/super-graph/compare/v0.13.9...v0.13.10
|
||||
[v0.13.9]: https://github.com/dosco/super-graph/compare/v0.13.8...v0.13.9
|
||||
[v0.13.8]: https://github.com/dosco/super-graph/compare/v0.13.7...v0.13.8
|
||||
[v0.13.7]: https://github.com/dosco/super-graph/compare/v0.13.6...v0.13.7
|
||||
[v0.13.6]: https://github.com/dosco/super-graph/compare/v0.13.5...v0.13.6
|
||||
[v0.13.5]: https://github.com/dosco/super-graph/compare/v0.13.4...v0.13.5
|
||||
[v0.13.4]: https://github.com/dosco/super-graph/compare/v0.13.3...v0.13.4
|
||||
[v0.13.3]: https://github.com/dosco/super-graph/compare/v0.13.2...v0.13.3
|
||||
[v0.13.2]: https://github.com/dosco/super-graph/compare/v0.13.1...v0.13.2
|
||||
[v0.13.1]: https://github.com/dosco/super-graph/compare/v0.13.0...v0.13.1
|
||||
[v0.13.0]: https://github.com/dosco/super-graph/compare/v0.12.49...v0.13.0
|
||||
[v0.12.49]: https://github.com/dosco/super-graph/compare/v0.12.48...v0.12.49
|
||||
[v0.12.48]: https://github.com/dosco/super-graph/compare/v0.12.47...v0.12.48
|
||||
[v0.12.47]: https://github.com/dosco/super-graph/compare/v0.12.46...v0.12.47
|
||||
[v0.12.46]: https://github.com/dosco/super-graph/compare/v0.12.45...v0.12.46
|
||||
[v0.12.45]: https://github.com/dosco/super-graph/compare/v0.12.44...v0.12.45
|
||||
[v0.12.44]: https://github.com/dosco/super-graph/compare/v0.12.43...v0.12.44
|
||||
[v0.12.43]: https://github.com/dosco/super-graph/compare/v0.12.42...v0.12.43
|
||||
[v0.12.42]: https://github.com/dosco/super-graph/compare/v0.12.41...v0.12.42
|
||||
[v0.12.41]: https://github.com/dosco/super-graph/compare/v0.12.40...v0.12.41
|
||||
[v0.12.40]: https://github.com/dosco/super-graph/compare/v0.12.39...v0.12.40
|
||||
[v0.12.39]: https://github.com/dosco/super-graph/compare/v0.12.38...v0.12.39
|
||||
[v0.12.38]: https://github.com/dosco/super-graph/compare/v0.12.37...v0.12.38
|
||||
[v0.12.37]: https://github.com/dosco/super-graph/compare/v0.12.36...v0.12.37
|
||||
[v0.12.36]: https://github.com/dosco/super-graph/compare/v0.12.35...v0.12.36
|
||||
[v0.12.35]: https://github.com/dosco/super-graph/compare/v0.12.34...v0.12.35
|
||||
[v0.12.34]: https://github.com/dosco/super-graph/compare/v0.12.33...v0.12.34
|
||||
[v0.12.33]: https://github.com/dosco/super-graph/compare/v0.12.32...v0.12.33
|
||||
[v0.12.32]: https://github.com/dosco/super-graph/compare/v0.12.31...v0.12.32
|
||||
[v0.12.31]: https://github.com/dosco/super-graph/compare/v0.12.30...v0.12.31
|
||||
[v0.12.30]: https://github.com/dosco/super-graph/compare/v0.12.29...v0.12.30
|
||||
[v0.12.29]: https://github.com/dosco/super-graph/compare/v0.12.28...v0.12.29
|
||||
[v0.12.28]: https://github.com/dosco/super-graph/compare/v0.12.27...v0.12.28
|
||||
[v0.12.27]: https://github.com/dosco/super-graph/compare/v0.12.26...v0.12.27
|
||||
[v0.12.26]: https://github.com/dosco/super-graph/compare/v0.12.25...v0.12.26
|
||||
[v0.12.25]: https://github.com/dosco/super-graph/compare/v0.12.24...v0.12.25
|
||||
[v0.12.24]: https://github.com/dosco/super-graph/compare/v0.12.23...v0.12.24
|
||||
[v0.12.23]: https://github.com/dosco/super-graph/compare/v0.12.22...v0.12.23
|
||||
[v0.12.22]: https://github.com/dosco/super-graph/compare/v0.12.21...v0.12.22
|
||||
[v0.12.21]: https://github.com/dosco/super-graph/compare/v0.12.20...v0.12.21
|
||||
[v0.12.20]: https://github.com/dosco/super-graph/compare/v0.12.19...v0.12.20
|
||||
[v0.12.19]: https://github.com/dosco/super-graph/compare/v0.12.18...v0.12.19
|
||||
[v0.12.18]: https://github.com/dosco/super-graph/compare/v0.12.17...v0.12.18
|
||||
[v0.12.17]: https://github.com/dosco/super-graph/compare/v0.12.16...v0.12.17
|
||||
[v0.12.16]: https://github.com/dosco/super-graph/compare/v0.12.15...v0.12.16
|
||||
[v0.12.15]: https://github.com/dosco/super-graph/compare/v0.12.14...v0.12.15
|
||||
[v0.12.14]: https://github.com/dosco/super-graph/compare/v0.12.13...v0.12.14
|
||||
[v0.12.13]: https://github.com/dosco/super-graph/compare/v0.12.12...v0.12.13
|
||||
[v0.12.12]: https://github.com/dosco/super-graph/compare/v0.12.11...v0.12.12
|
||||
[v0.12.11]: https://github.com/dosco/super-graph/compare/v0.12.10...v0.12.11
|
||||
[v0.12.10]: https://github.com/dosco/super-graph/compare/v0.12.9...v0.12.10
|
||||
[v0.12.9]: https://github.com/dosco/super-graph/compare/v0.12.8...v0.12.9
|
||||
[v0.12.8]: https://github.com/dosco/super-graph/compare/v0.12.7...v0.12.8
|
||||
[v0.12.7]: https://github.com/dosco/super-graph/compare/v0.12.6...v0.12.7
|
||||
[v0.12.6]: https://github.com/dosco/super-graph/compare/v0.12.5...v0.12.6
|
||||
[v0.12.5]: https://github.com/dosco/super-graph/compare/v0.12.4...v0.12.5
|
||||
[v0.12.4]: https://github.com/dosco/super-graph/compare/v0.12.3...v0.12.4
|
||||
|
18
Dockerfile
18
Dockerfile
@ -1,10 +1,12 @@
|
||||
# stage: 1
|
||||
FROM node:10 as react-build
|
||||
WORKDIR /web
|
||||
COPY /cmd/internal/serv/web/ ./
|
||||
COPY /internal/serv/web/ ./
|
||||
RUN yarn
|
||||
RUN yarn build
|
||||
|
||||
|
||||
|
||||
# stage: 2
|
||||
FROM golang:1.14-alpine as go-build
|
||||
RUN apk update && \
|
||||
@ -22,14 +24,16 @@ RUN chmod 755 /usr/local/bin/sops
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
RUN mkdir -p /app/cmd/internal/serv/web/build
|
||||
COPY --from=react-build /web/build/ ./cmd/internal/serv/web/build
|
||||
RUN mkdir -p /app/internal/serv/web/build
|
||||
COPY --from=react-build /web/build/ ./internal/serv/web/build
|
||||
|
||||
RUN go mod vendor
|
||||
RUN make build
|
||||
RUN echo "Compressing binary, will take a bit of time..." && \
|
||||
upx --ultra-brute -qq super-graph && \
|
||||
upx -t super-graph
|
||||
# RUN echo "Compressing binary, will take a bit of time..." && \
|
||||
# upx --ultra-brute -qq super-graph && \
|
||||
# upx -t super-graph
|
||||
|
||||
|
||||
|
||||
# stage: 3
|
||||
FROM alpine:latest
|
||||
@ -41,7 +45,7 @@ RUN mkdir -p /config
|
||||
COPY --from=go-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=go-build /app/config/* /config/
|
||||
COPY --from=go-build /app/super-graph .
|
||||
COPY --from=go-build /app/cmd/scripts/start.sh .
|
||||
COPY --from=go-build /app/internal/scripts/start.sh .
|
||||
COPY --from=go-build /usr/local/bin/sops .
|
||||
|
||||
RUN chmod +x /super-graph
|
||||
|
25
Makefile
25
Makefile
@ -12,30 +12,30 @@ endif
|
||||
export GO111MODULE := on
|
||||
|
||||
# Build-time Go variables
|
||||
version = github.com/dosco/super-graph/serv.version
|
||||
gitBranch = github.com/dosco/super-graph/serv.gitBranch
|
||||
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA
|
||||
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime
|
||||
version = github.com/dosco/super-graph/internal/serv.version
|
||||
gitBranch = github.com/dosco/super-graph/internal/serv.gitBranch
|
||||
lastCommitSHA = github.com/dosco/super-graph/internal/serv.lastCommitSHA
|
||||
lastCommitTime = github.com/dosco/super-graph/internal/serv.lastCommitTime
|
||||
|
||||
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
|
||||
|
||||
.PHONY: all build gen clean test run lint changlog release version help $(PLATFORMS)
|
||||
|
||||
test:
|
||||
@go test -v ./...
|
||||
@go test -v -short -race ./...
|
||||
|
||||
BIN_DIR := $(GOPATH)/bin
|
||||
GORICE := $(BIN_DIR)/rice
|
||||
GOLANGCILINT := $(BIN_DIR)/golangci-lint
|
||||
GITCHGLOG := $(BIN_DIR)/git-chglog
|
||||
WEB_BUILD_DIR := ./cmd/internal/serv/web/build/manifest.json
|
||||
WEB_BUILD_DIR := ./internal/serv/web/build/manifest.json
|
||||
|
||||
$(GORICE):
|
||||
@GO111MODULE=off go get -u github.com/GeertJohan/go.rice/rice
|
||||
|
||||
$(WEB_BUILD_DIR):
|
||||
@echo "First install Yarn and create a build of the web UI then re-run make install"
|
||||
@echo "Run this command: yarn --cwd cmd/internal/serv/web/ build"
|
||||
@echo "Run this command: yarn --cwd internal/serv/web/ build"
|
||||
@exit 1
|
||||
|
||||
$(GITCHGLOG):
|
||||
@ -45,7 +45,7 @@ changelog: $(GITCHGLOG)
|
||||
@git-chglog $(ARGS)
|
||||
|
||||
$(GOLANGCILINT):
|
||||
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.21.0
|
||||
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.25.1
|
||||
|
||||
lint: $(GOLANGCILINT)
|
||||
@golangci-lint run ./... --skip-dirs-use-default
|
||||
@ -57,7 +57,7 @@ os = $(word 1, $@)
|
||||
|
||||
$(PLATFORMS): lint test
|
||||
@mkdir -p release
|
||||
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64 cmd/main.go
|
||||
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64 main.go
|
||||
|
||||
release: windows linux darwin
|
||||
|
||||
@ -69,7 +69,7 @@ gen: $(GORICE) $(WEB_BUILD_DIR)
|
||||
@go generate ./...
|
||||
|
||||
$(BINARY): clean
|
||||
@go build $(BUILD_FLAGS) -o $(BINARY) cmd/main.go
|
||||
@go build $(BUILD_FLAGS) -o $(BINARY) main.go
|
||||
|
||||
clean:
|
||||
@rm -f $(BINARY)
|
||||
@ -77,11 +77,10 @@ clean:
|
||||
run: clean
|
||||
@go run $(BUILD_FLAGS) main.go $(ARGS)
|
||||
|
||||
install:
|
||||
@echo $(GOPATH)
|
||||
install: clean build
|
||||
@echo "Commit Hash: `git rev-parse HEAD`"
|
||||
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
|
||||
@go install $(BUILD_FLAGS) cmd
|
||||
@mv $(BINARY) $(GOPATH)/bin/$(BINARY)
|
||||
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
|
||||
|
||||
uninstall: clean
|
||||
|
100
README.md
100
README.md
@ -1,28 +1,71 @@
|
||||
<!-- <a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a> -->
|
||||
<img src="docs/website/static/img/super-graph-logo.svg" width="80" />
|
||||
|
||||
<img src="docs/.vuepress/public/super-graph.png" width="250" />
|
||||
# Super Graph - Fetch data without code!
|
||||
|
||||
### Build web products faster. Secure high performance GraphQL
|
||||
|
||||

|
||||

|
||||

|
||||
[](https://pkg.go.dev/github.com/dosco/super-graph/core?tab=doc)
|
||||

|
||||

|
||||
[](https://discord.gg/6pSWCTZ)
|
||||
|
||||
Super Graph gives you a high performance GraphQL API without you having to write any code. GraphQL is automagically compiled into an efficient SQL query. Use it either as a library or a standalone service.
|
||||
|
||||
## What is Super Graph
|
||||
## Using it as a service
|
||||
|
||||
Is designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance and secure GraphQL API for Postgres DB. GraphQL queries are translated into a single fast SQL query. No more writing API code as you develop
|
||||
your web frontend just make the query you need and Super Graph will do the rest.
|
||||
```console
|
||||
go get github.com/dosco/super-graph
|
||||
super-graph new <app_name>
|
||||
```
|
||||
|
||||
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, role and attribute based access control, support for JWT tokens, built-in DB mutations and seeding, and a lot more.
|
||||
## Using it in your own code
|
||||
|
||||

|
||||
```console
|
||||
go get github.com/dosco/super-graph/core
|
||||
```
|
||||
|
||||
```golang
|
||||
package main
|
||||
|
||||
## The story of Super Graph?
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/dosco/super-graph/core"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
posts {
|
||||
id
|
||||
title
|
||||
}
|
||||
}`
|
||||
|
||||
ctx = context.WithValue(ctx, core.UserIDKey, 1)
|
||||
|
||||
res, err := sg.GraphQL(ctx, query, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
}
|
||||
```
|
||||
|
||||
## About Super Graph
|
||||
|
||||
After working on several products through my career I found that we spend way too much time on building API backends. Most APIs also need constant updating, and this costs time and money.
|
||||
|
||||
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.
|
||||
|
||||
@ -30,44 +73,35 @@ I didn't want to write this code anymore, I wanted the computer to do it. Enter
|
||||
|
||||
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.
|
||||
|
||||
This compiler is what sits at the heart of Super Graph with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations and everything else needed for you to build production ready apps with it.
|
||||
This compiler is what sits at the heart of Super Graph, with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations, and everything else needed for you to build production-ready apps with it.
|
||||
|
||||
## Features
|
||||
|
||||
- Complex nested queries and mutations
|
||||
- Auto learns database tables and relationships
|
||||
- Role and Attribute based access control
|
||||
- Full text search and aggregations
|
||||
- Role and Attribute-based access control
|
||||
- Opaque cursor-based efficient pagination
|
||||
- Full-text search and aggregations
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join database queries with remote REST APIs
|
||||
- Also works with existing Ruby-On-Rails apps
|
||||
- Rails authentication supported (Redis, Memcache, Cookie)
|
||||
- A simple config file
|
||||
- High performance GO codebase
|
||||
- High performance Go codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Fuzz tested for security
|
||||
- Database migrations tool
|
||||
- Database seeding tool
|
||||
- Works with Postgres and YugabyteDB
|
||||
|
||||
## Get started
|
||||
|
||||
```
|
||||
git clone https://github.com/dosco/super-graph
|
||||
cd ./super-graph
|
||||
make install
|
||||
|
||||
super-graph new <app_name>
|
||||
```
|
||||
- Works with Postgres and Yugabyte DB
|
||||
- OpenCensus Support: Zipkin, Prometheus, X-Ray, Stackdriver
|
||||
|
||||
## Documentation
|
||||
|
||||
[supergraph.dev](https://supergraph.dev)
|
||||
|
||||
## Contact me
|
||||
## Reach out
|
||||
|
||||
I'm happy to help you deploy Super Graph so feel free to reach out over
|
||||
Twitter or Discord.
|
||||
We're happy to help you leverage Super Graph reach out if you have questions
|
||||
|
||||
[twitter/dosco](https://twitter.com/dosco)
|
||||
|
||||
@ -78,5 +112,3 @@ Twitter or Discord.
|
||||
[Apache Public License 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
Copyright (c) 2019-present Vikram Rangnekar
|
||||
|
||||
|
||||
|
@ -1,7 +0,0 @@
|
||||
package serv
|
||||
|
||||
// func (c *coreContext) handleReq(w io.Writer, req *http.Request) error {
|
||||
|
||||
// return nil
|
||||
|
||||
// }
|
@ -1,124 +0,0 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/cmd/internal/serv/internal/auth"
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/rs/cors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
maxReadBytes = 100000 // 100Kb
|
||||
introspectionQuery = "IntrospectionQuery"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnauthorized = errors.New("not authorized")
|
||||
)
|
||||
|
||||
type gqlReq struct {
|
||||
OpName string `json:"operationName"`
|
||||
Query string `json:"query"`
|
||||
Vars json.RawMessage `json:"variables"`
|
||||
}
|
||||
|
||||
type errorResp struct {
|
||||
Error error `json:"error"`
|
||||
}
|
||||
|
||||
func apiV1Handler() http.Handler {
|
||||
h, err := auth.WithAuth(http.HandlerFunc(apiV1), &conf.Auth)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR %s", err)
|
||||
}
|
||||
|
||||
if len(conf.AllowedOrigins) != 0 {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: conf.AllowedOrigins,
|
||||
AllowCredentials: true,
|
||||
Debug: conf.DebugCORS,
|
||||
})
|
||||
h = c.Handler(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func apiV1(w http.ResponseWriter, r *http.Request) {
|
||||
ct := r.Context()
|
||||
|
||||
//nolint: errcheck
|
||||
if conf.AuthFailBlock && !auth.IsAuth(ct) {
|
||||
renderErr(w, errUnauthorized, nil)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(io.LimitReader(r.Body, maxReadBytes))
|
||||
if err != nil {
|
||||
renderErr(w, err, nil)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
req := gqlReq{}
|
||||
|
||||
err = json.Unmarshal(b, &req)
|
||||
if err != nil {
|
||||
renderErr(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.EqualFold(req.OpName, introspectionQuery) {
|
||||
introspect(w)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := sg.GraphQL(ct, req.Query, req.Vars)
|
||||
|
||||
if logLevel >= LogLevelDebug {
|
||||
log.Printf("DBG query:\n%s\nsql:\n%s", req.Query, res.SQL())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
renderErr(w, err, res)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(res)
|
||||
|
||||
if logLevel >= LogLevelInfo {
|
||||
zlog.Info("success",
|
||||
zap.String("op", res.Operation()),
|
||||
zap.String("name", res.QueryName()),
|
||||
zap.String("role", res.Role()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: errcheck
|
||||
func renderErr(w http.ResponseWriter, err error, res *core.Result) {
|
||||
if err == errUnauthorized {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(&errorResp{err})
|
||||
|
||||
if logLevel >= LogLevelError {
|
||||
if res != nil {
|
||||
zlog.Error(err.Error(),
|
||||
zap.String("op", res.Operation()),
|
||||
zap.String("name", res.QueryName()),
|
||||
zap.String("role", res.Role()),
|
||||
)
|
||||
} else {
|
||||
zlog.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
@ -1,153 +0,0 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
func initConf() (*Config, error) {
|
||||
c, err := ReadInConfig(path.Join(confPath, GetConfigName()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch c.LogLevel {
|
||||
case "debug":
|
||||
logLevel = LogLevelDebug
|
||||
case "error":
|
||||
logLevel = LogLevelError
|
||||
case "warn":
|
||||
logLevel = LogLevelWarn
|
||||
case "info":
|
||||
logLevel = LogLevelInfo
|
||||
default:
|
||||
logLevel = LogLevelNone
|
||||
}
|
||||
|
||||
// Auths: validate and sanitize
|
||||
am := make(map[string]struct{})
|
||||
|
||||
for i := 0; i < len(c.Auths); i++ {
|
||||
a := &c.Auths[i]
|
||||
a.Name = sanitize(a.Name)
|
||||
|
||||
if _, ok := am[a.Name]; ok {
|
||||
c.Auths = append(c.Auths[:i], c.Auths[i+1:]...)
|
||||
log.Printf("WRN duplicate auth found: %s", a.Name)
|
||||
}
|
||||
am[a.Name] = struct{}{}
|
||||
}
|
||||
|
||||
// Actions: validate and sanitize
|
||||
axm := make(map[string]struct{})
|
||||
|
||||
for i := 0; i < len(c.Actions); i++ {
|
||||
a := &c.Actions[i]
|
||||
a.Name = sanitize(a.Name)
|
||||
a.AuthName = sanitize(a.AuthName)
|
||||
|
||||
if _, ok := axm[a.Name]; ok {
|
||||
c.Actions = append(c.Actions[:i], c.Actions[i+1:]...)
|
||||
log.Printf("WRN duplicate action found: %s", a.Name)
|
||||
}
|
||||
|
||||
if _, ok := am[a.AuthName]; !ok {
|
||||
c.Actions = append(c.Actions[:i], c.Actions[i+1:]...)
|
||||
log.Printf("WRN invalid auth_name '%s' for auth: %s", a.AuthName, a.Name)
|
||||
}
|
||||
axm[a.Name] = struct{}{}
|
||||
}
|
||||
|
||||
var anonFound bool
|
||||
|
||||
for _, r := range c.Roles {
|
||||
if sanitize(r.Name) == "anon" {
|
||||
anonFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if !anonFound {
|
||||
log.Printf("WRN unauthenticated requests will be blocked. no role 'anon' defined")
|
||||
c.AuthFailBlock = false
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func initDB(c *Config) (*sql.DB, error) {
|
||||
var db *sql.DB
|
||||
var err error
|
||||
|
||||
cs := fmt.Sprintf("postgres://%s:%s@%s:%d/%s",
|
||||
c.DB.User, c.DB.Password,
|
||||
c.DB.Host, c.DB.Port, c.DB.DBName)
|
||||
|
||||
for i := 1; i < 10; i++ {
|
||||
db, err = sql.Open("pgx", cs)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Duration(i*100) * time.Millisecond)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
|
||||
// config, _ := pgxpool.ParseConfig("")
|
||||
// config.ConnConfig.Host = c.DB.Host
|
||||
// config.ConnConfig.Port = c.DB.Port
|
||||
// config.ConnConfig.Database = c.DB.DBName
|
||||
// config.ConnConfig.User = c.DB.User
|
||||
// config.ConnConfig.Password = c.DB.Password
|
||||
// config.ConnConfig.RuntimeParams = map[string]string{
|
||||
// "application_name": c.AppName,
|
||||
// "search_path": c.DB.Schema,
|
||||
// }
|
||||
|
||||
// switch c.LogLevel {
|
||||
// case "debug":
|
||||
// config.ConnConfig.LogLevel = pgx.LogLevelDebug
|
||||
// case "info":
|
||||
// config.ConnConfig.LogLevel = pgx.LogLevelInfo
|
||||
// case "warn":
|
||||
// config.ConnConfig.LogLevel = pgx.LogLevelWarn
|
||||
// case "error":
|
||||
// config.ConnConfig.LogLevel = pgx.LogLevelError
|
||||
// default:
|
||||
// config.ConnConfig.LogLevel = pgx.LogLevelNone
|
||||
// }
|
||||
|
||||
// config.ConnConfig.Logger = NewSQLLogger(logger)
|
||||
|
||||
// // if c.DB.MaxRetries != 0 {
|
||||
// // opt.MaxRetries = c.DB.MaxRetries
|
||||
// // }
|
||||
|
||||
// if c.DB.PoolSize != 0 {
|
||||
// config.MaxConns = conf.DB.PoolSize
|
||||
// }
|
||||
|
||||
// var db *pgxpool.Pool
|
||||
// var err error
|
||||
|
||||
// for i := 1; i < 10; i++ {
|
||||
// db, err = pgxpool.ConnectConfig(context.Background(), config)
|
||||
// if err == nil {
|
||||
// break
|
||||
// }
|
||||
// time.Sleep(time.Duration(i*100) * time.Millisecond)
|
||||
// }
|
||||
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// return db, nil
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package serv
|
||||
|
||||
import "net/http"
|
||||
|
||||
//nolint: errcheck
|
||||
func introspect(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{
|
||||
"data": {
|
||||
"__schema": {
|
||||
"queryType": {
|
||||
"name": "Query"
|
||||
},
|
||||
"mutationType": null,
|
||||
"subscriptionType": null
|
||||
}
|
||||
},
|
||||
"extensions":{
|
||||
"tracing":{
|
||||
"version":1,
|
||||
"startTime":"2019-06-04T19:53:31.093Z",
|
||||
"endTime":"2019-06-04T19:53:31.108Z",
|
||||
"duration":15219720,
|
||||
"execution": {
|
||||
"resolvers": [{
|
||||
"path": ["__schema"],
|
||||
"parentType": "Query",
|
||||
"fieldName": "__schema",
|
||||
"returnType": "__Schema!",
|
||||
"startOffset": 50950,
|
||||
"duration": 17187
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`))
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
version: '3.4'
|
||||
services:
|
||||
# Postgres DB
|
||||
db:
|
||||
image: postgres:12
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
# Yugabyte DB
|
||||
# yb-master:
|
||||
# image: yugabytedb/yugabyte:latest
|
||||
# container_name: yb-master-n1
|
||||
# command: [ "/home/yugabyte/bin/yb-master",
|
||||
# "--fs_data_dirs=/mnt/disk0,/mnt/disk1",
|
||||
# "--master_addresses=yb-master-n1:7100",
|
||||
# "--replication_factor=1",
|
||||
# "--enable_ysql=true"]
|
||||
# ports:
|
||||
# - "7000:7000"
|
||||
# environment:
|
||||
# SERVICE_7000_NAME: yb-master
|
||||
|
||||
# db:
|
||||
# image: yugabytedb/yugabyte:latest
|
||||
# container_name: yb-tserver-n1
|
||||
# command: [ "/home/yugabyte/bin/yb-tserver",
|
||||
# "--fs_data_dirs=/mnt/disk0,/mnt/disk1",
|
||||
# "--start_pgsql_proxy",
|
||||
# "--tserver_master_addrs=yb-master-n1:7100"]
|
||||
# ports:
|
||||
# - "9042:9042"
|
||||
# - "6379:6379"
|
||||
# - "5433:5433"
|
||||
# - "9000:9000"
|
||||
# environment:
|
||||
# SERVICE_5433_NAME: ysql
|
||||
# SERVICE_9042_NAME: ycql
|
||||
# SERVICE_6379_NAME: yedis
|
||||
# SERVICE_9000_NAME: yb-tserver
|
||||
# depends_on:
|
||||
# - yb-master
|
||||
|
||||
{% app_name_slug %}_api:
|
||||
image: dosco/super-graph:latest
|
||||
environment:
|
||||
GO_ENV: "development"
|
||||
# Uncomment below for Yugabyte DB
|
||||
# SG_DATABASE_PORT: 5433
|
||||
# SG_DATABASE_USER: yugabyte
|
||||
# SG_DATABASE_PASSWORD: yugabyte
|
||||
volumes:
|
||||
- ./config:/config
|
||||
ports:
|
||||
- "8080:8080"
|
||||
depends_on:
|
||||
- db
|
@ -1,30 +0,0 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "/static/css/main.c6b5c55c.chunk.css",
|
||||
"main.js": "/static/js/main.04d74040.chunk.js",
|
||||
"main.js.map": "/static/js/main.04d74040.chunk.js.map",
|
||||
"runtime-main.js": "/static/js/runtime-main.4aea9da3.js",
|
||||
"runtime-main.js.map": "/static/js/runtime-main.4aea9da3.js.map",
|
||||
"static/js/2.03370bd3.chunk.js": "/static/js/2.03370bd3.chunk.js",
|
||||
"static/js/2.03370bd3.chunk.js.map": "/static/js/2.03370bd3.chunk.js.map",
|
||||
"index.html": "/index.html",
|
||||
"precache-manifest.e33bc3c7c6774d7032c490820c96901d.js": "/precache-manifest.e33bc3c7c6774d7032c490820c96901d.js",
|
||||
"service-worker.js": "/service-worker.js",
|
||||
"static/css/main.c6b5c55c.chunk.css.map": "/static/css/main.c6b5c55c.chunk.css.map",
|
||||
"static/media/GraphQLLanguageService.js.flow": "/static/media/GraphQLLanguageService.js.5ab204b9.flow",
|
||||
"static/media/autocompleteUtils.js.flow": "/static/media/autocompleteUtils.js.4ce7ba19.flow",
|
||||
"static/media/getAutocompleteSuggestions.js.flow": "/static/media/getAutocompleteSuggestions.js.7f98f032.flow",
|
||||
"static/media/getDefinition.js.flow": "/static/media/getDefinition.js.4dbec62f.flow",
|
||||
"static/media/getDiagnostics.js.flow": "/static/media/getDiagnostics.js.65b0979a.flow",
|
||||
"static/media/getHoverInformation.js.flow": "/static/media/getHoverInformation.js.d9411837.flow",
|
||||
"static/media/getOutline.js.flow": "/static/media/getOutline.js.c04e3998.flow",
|
||||
"static/media/index.js.flow": "/static/media/index.js.02c24280.flow",
|
||||
"static/media/logo.png": "/static/media/logo.57ee3b60.png"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/js/runtime-main.4aea9da3.js",
|
||||
"static/js/2.03370bd3.chunk.js",
|
||||
"static/css/main.c6b5c55c.chunk.css",
|
||||
"static/js/main.04d74040.chunk.js"
|
||||
]
|
||||
}
|
@ -1 +0,0 @@
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="shortcut icon" href="/favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,shrink-to-fit=no"/><meta name="theme-color" content="#000000"/><link rel="manifest" href="/manifest.json"/><link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,600,700|Source+Code+Pro:400,700" rel="stylesheet"><title>Super Graph - GraphQL API for Rails</title><link href="/static/css/main.c6b5c55c.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(i){function e(e){for(var r,t,n=e[0],o=e[1],u=e[2],l=0,f=[];l<n.length;l++)t=n[l],Object.prototype.hasOwnProperty.call(p,t)&&p[t]&&f.push(p[t][0]),p[t]=0;for(r in o)Object.prototype.hasOwnProperty.call(o,r)&&(i[r]=o[r]);for(s&&s(e);f.length;)f.shift()();return c.push.apply(c,u||[]),a()}function a(){for(var e,r=0;r<c.length;r++){for(var t=c[r],n=!0,o=1;o<t.length;o++){var u=t[o];0!==p[u]&&(n=!1)}n&&(c.splice(r--,1),e=l(l.s=t[0]))}return e}var t={},p={1:0},c=[];function l(e){if(t[e])return t[e].exports;var r=t[e]={i:e,l:!1,exports:{}};return i[e].call(r.exports,r,r.exports,l),r.l=!0,r.exports}l.m=i,l.c=t,l.d=function(e,r,t){l.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},l.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(r,e){if(1&e&&(r=l(r)),8&e)return r;if(4&e&&"object"==typeof r&&r&&r.__esModule)return r;var t=Object.create(null);if(l.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:r}),2&e&&"string"!=typeof r)for(var n in r)l.d(t,n,function(e){return r[e]}.bind(null,n));return t},l.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(r,"a",r),r},l.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},l.p="/";var r=this.webpackJsonpweb=this.webpackJsonpweb||[],n=r.push.bind(r);r.push=e,r=r.slice();for(var o=0;o<r.length;o++)e(r[o]);var s=n;a()}([])</script><script src="/static/js/2.03370bd3.chunk.js"></script><script src="/static/js/main.04d74040.chunk.js"></script></body></html>
|
@ -1,58 +0,0 @@
|
||||
self.__precacheManifest = (self.__precacheManifest || []).concat([
|
||||
{
|
||||
"revision": "ecdae64182d05c64e7f7f200ed03a4ed",
|
||||
"url": "/index.html"
|
||||
},
|
||||
{
|
||||
"revision": "6e9467dc213a3e2b84ea",
|
||||
"url": "/static/css/main.c6b5c55c.chunk.css"
|
||||
},
|
||||
{
|
||||
"revision": "c156a125990ddf5dcc51",
|
||||
"url": "/static/js/2.03370bd3.chunk.js"
|
||||
},
|
||||
{
|
||||
"revision": "6e9467dc213a3e2b84ea",
|
||||
"url": "/static/js/main.04d74040.chunk.js"
|
||||
},
|
||||
{
|
||||
"revision": "427262b6771d3f49a7c5",
|
||||
"url": "/static/js/runtime-main.4aea9da3.js"
|
||||
},
|
||||
{
|
||||
"revision": "5ab204b9b95c06640dbefae9a65b1db2",
|
||||
"url": "/static/media/GraphQLLanguageService.js.5ab204b9.flow"
|
||||
},
|
||||
{
|
||||
"revision": "4ce7ba191f7ebee4426768f246b2f0e0",
|
||||
"url": "/static/media/autocompleteUtils.js.4ce7ba19.flow"
|
||||
},
|
||||
{
|
||||
"revision": "7f98f032085704c8943ec2d1925c7c84",
|
||||
"url": "/static/media/getAutocompleteSuggestions.js.7f98f032.flow"
|
||||
},
|
||||
{
|
||||
"revision": "4dbec62f1d8e8417afb9cbd19f1268c3",
|
||||
"url": "/static/media/getDefinition.js.4dbec62f.flow"
|
||||
},
|
||||
{
|
||||
"revision": "65b0979ac23feca49e4411883fd8eaab",
|
||||
"url": "/static/media/getDiagnostics.js.65b0979a.flow"
|
||||
},
|
||||
{
|
||||
"revision": "d94118379d362fc161aa1246bcc14d43",
|
||||
"url": "/static/media/getHoverInformation.js.d9411837.flow"
|
||||
},
|
||||
{
|
||||
"revision": "c04e3998712b37a96f0bfd283fa06b52",
|
||||
"url": "/static/media/getOutline.js.c04e3998.flow"
|
||||
},
|
||||
{
|
||||
"revision": "02c24280c5e4a7eb3c6cfcb079a8f1e3",
|
||||
"url": "/static/media/index.js.02c24280.flow"
|
||||
},
|
||||
{
|
||||
"revision": "57ee3b6084cb9d3c754cc12d25a98035",
|
||||
"url": "/static/media/logo.57ee3b60.png"
|
||||
}
|
||||
]);
|
@ -1,2 +0,0 @@
|
||||
body{margin:0;padding:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;background-color:#0f202d}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.playground>div:nth-child(2){height:calc(100vh - 131px)}
|
||||
/*# sourceMappingURL=main.c6b5c55c.chunk.css.map */
|
File diff suppressed because one or more lines are too long
@ -1,2 +0,0 @@
|
||||
(this.webpackJsonpweb=this.webpackJsonpweb||[]).push([[0],{163:function(e,t,n){var r={".":61,"./":61,"./GraphQLLanguageService":117,"./GraphQLLanguageService.js":117,"./GraphQLLanguageService.js.flow":315,"./autocompleteUtils":91,"./autocompleteUtils.js":91,"./autocompleteUtils.js.flow":316,"./getAutocompleteSuggestions":77,"./getAutocompleteSuggestions.js":77,"./getAutocompleteSuggestions.js.flow":317,"./getDefinition":92,"./getDefinition.js":92,"./getDefinition.js.flow":318,"./getDiagnostics":94,"./getDiagnostics.js":94,"./getDiagnostics.js.flow":319,"./getHoverInformation":95,"./getHoverInformation.js":95,"./getHoverInformation.js.flow":320,"./getOutline":116,"./getOutline.js":116,"./getOutline.js.flow":321,"./index":61,"./index.js":61,"./index.js.flow":322};function o(e){var t=a(e);return n(t)}function a(e){if(!n.o(r,e)){var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}return r[e]}o.keys=function(){return Object.keys(r)},o.resolve=a,e.exports=o,o.id=163},190:function(e,t,n){"use strict";(function(e){var r=n(100),o=n(101),a=n(201),i=n(191),s=n(202),l=n(5),c=n.n(l),u=n(20),g=n(130),f=(n(441),window.fetch);window.fetch=function(){return arguments[1].credentials="include",Promise.resolve(f.apply(e,arguments))};var p=function(e){function t(){return Object(r.a)(this,t),Object(a.a)(this,Object(i.a)(t).apply(this,arguments))}return Object(s.a)(t,e),Object(o.a)(t,[{key:"render",value:function(){return c.a.createElement("div",null,c.a.createElement("header",{style:{background:"#09141b",color:"#03a9f4",letterSpacing:"0.15rem",height:"65px",display:"flex",alignItems:"center"}},c.a.createElement("h3",{style:{textDecoration:"none",margin:"0px",fontSize:"18px"}},c.a.createElement("span",{style:{textTransform:"uppercase",marginLeft:"20px",paddingRight:"10px",borderRight:"1px solid #fff"}},"Super Graph"),c.a.createElement("span",{style:{fontSize:"16px",marginLeft:"10px",color:"#fff"}},"Instant GraphQL"))),c.a.createElement(u.Provider,{store:g.store},c.a.createElement(g.Playground,{endpoint:"/api/v1/graphql",settings:"{ 'schema.polling.enable': false, 'request.credentials': 'include', 'general.betaUpdates': true, 'editor.reuseHeaders': true, 'editor.theme': 'dark' }"})))}}]),t}(l.Component);t.a=p}).call(this,n(32))},205:function(e,t,n){e.exports=n(206)},206:function(e,t,n){"use strict";n.r(t);var r=n(5),o=n.n(r),a=n(52),i=n.n(a),s=n(190);i.a.render(o.a.createElement(s.a,null),document.getElementById("root"))},441:function(e,t,n){}},[[205,1,2]]]);
|
||||
//# sourceMappingURL=main.04d74040.chunk.js.map
|
File diff suppressed because one or more lines are too long
@ -1,328 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
import type {
|
||||
DocumentNode,
|
||||
FragmentSpreadNode,
|
||||
FragmentDefinitionNode,
|
||||
OperationDefinitionNode,
|
||||
TypeDefinitionNode,
|
||||
NamedTypeNode,
|
||||
} from 'graphql';
|
||||
import type {
|
||||
CompletionItem,
|
||||
DefinitionQueryResult,
|
||||
Diagnostic,
|
||||
GraphQLCache,
|
||||
GraphQLConfig,
|
||||
GraphQLProjectConfig,
|
||||
Uri,
|
||||
} from 'graphql-language-service-types';
|
||||
import type {Position} from 'graphql-language-service-utils';
|
||||
import type {Hover} from 'vscode-languageserver-types';
|
||||
|
||||
import {Kind, parse, print} from 'graphql';
|
||||
import {getAutocompleteSuggestions} from './getAutocompleteSuggestions';
|
||||
import {getHoverInformation} from './getHoverInformation';
|
||||
import {validateQuery, getRange, SEVERITY} from './getDiagnostics';
|
||||
import {
|
||||
getDefinitionQueryResultForFragmentSpread,
|
||||
getDefinitionQueryResultForDefinitionNode,
|
||||
getDefinitionQueryResultForNamedType,
|
||||
} from './getDefinition';
|
||||
import {getASTNodeAtPosition} from 'graphql-language-service-utils';
|
||||
|
||||
const {
|
||||
FRAGMENT_DEFINITION,
|
||||
OBJECT_TYPE_DEFINITION,
|
||||
INTERFACE_TYPE_DEFINITION,
|
||||
ENUM_TYPE_DEFINITION,
|
||||
UNION_TYPE_DEFINITION,
|
||||
SCALAR_TYPE_DEFINITION,
|
||||
INPUT_OBJECT_TYPE_DEFINITION,
|
||||
SCALAR_TYPE_EXTENSION,
|
||||
OBJECT_TYPE_EXTENSION,
|
||||
INTERFACE_TYPE_EXTENSION,
|
||||
UNION_TYPE_EXTENSION,
|
||||
ENUM_TYPE_EXTENSION,
|
||||
INPUT_OBJECT_TYPE_EXTENSION,
|
||||
DIRECTIVE_DEFINITION,
|
||||
FRAGMENT_SPREAD,
|
||||
OPERATION_DEFINITION,
|
||||
NAMED_TYPE,
|
||||
} = Kind;
|
||||
|
||||
export class GraphQLLanguageService {
|
||||
_graphQLCache: GraphQLCache;
|
||||
_graphQLConfig: GraphQLConfig;
|
||||
|
||||
constructor(cache: GraphQLCache) {
|
||||
this._graphQLCache = cache;
|
||||
this._graphQLConfig = cache.getGraphQLConfig();
|
||||
}
|
||||
|
||||
async getDiagnostics(
|
||||
query: string,
|
||||
uri: Uri,
|
||||
isRelayCompatMode?: boolean,
|
||||
): Promise<Array<Diagnostic>> {
|
||||
// Perform syntax diagnostics first, as this doesn't require
|
||||
// schema/fragment definitions, even the project configuration.
|
||||
let queryHasExtensions = false;
|
||||
const projectConfig = this._graphQLConfig.getConfigForFile(uri);
|
||||
const schemaPath = projectConfig.schemaPath;
|
||||
try {
|
||||
const queryAST = parse(query);
|
||||
if (!schemaPath || uri !== schemaPath) {
|
||||
queryHasExtensions = queryAST.definitions.some(definition => {
|
||||
switch (definition.kind) {
|
||||
case OBJECT_TYPE_DEFINITION:
|
||||
case INTERFACE_TYPE_DEFINITION:
|
||||
case ENUM_TYPE_DEFINITION:
|
||||
case UNION_TYPE_DEFINITION:
|
||||
case SCALAR_TYPE_DEFINITION:
|
||||
case INPUT_OBJECT_TYPE_DEFINITION:
|
||||
case SCALAR_TYPE_EXTENSION:
|
||||
case OBJECT_TYPE_EXTENSION:
|
||||
case INTERFACE_TYPE_EXTENSION:
|
||||
case UNION_TYPE_EXTENSION:
|
||||
case ENUM_TYPE_EXTENSION:
|
||||
case INPUT_OBJECT_TYPE_EXTENSION:
|
||||
case DIRECTIVE_DEFINITION:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
const range = getRange(error.locations[0], query);
|
||||
return [
|
||||
{
|
||||
severity: SEVERITY.ERROR,
|
||||
message: error.message,
|
||||
source: 'GraphQL: Syntax',
|
||||
range,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
// If there's a matching config, proceed to prepare to run validation
|
||||
let source = query;
|
||||
const fragmentDefinitions = await this._graphQLCache.getFragmentDefinitions(
|
||||
projectConfig,
|
||||
);
|
||||
const fragmentDependencies = await this._graphQLCache.getFragmentDependencies(
|
||||
query,
|
||||
fragmentDefinitions,
|
||||
);
|
||||
const dependenciesSource = fragmentDependencies.reduce(
|
||||
(prev, cur) => `${prev} ${print(cur.definition)}`,
|
||||
'',
|
||||
);
|
||||
|
||||
source = `${source} ${dependenciesSource}`;
|
||||
|
||||
let validationAst = null;
|
||||
try {
|
||||
validationAst = parse(source);
|
||||
} catch (error) {
|
||||
// the query string is already checked to be parsed properly - errors
|
||||
// from this parse must be from corrupted fragment dependencies.
|
||||
// For IDEs we don't care for errors outside of the currently edited
|
||||
// query, so we return an empty array here.
|
||||
return [];
|
||||
}
|
||||
|
||||
// Check if there are custom validation rules to be used
|
||||
let customRules;
|
||||
const customRulesModulePath =
|
||||
projectConfig.extensions.customValidationRules;
|
||||
if (customRulesModulePath) {
|
||||
/* eslint-disable no-implicit-coercion */
|
||||
const rulesPath = require.resolve(`${customRulesModulePath}`);
|
||||
if (rulesPath) {
|
||||
customRules = require(`${rulesPath}`)(this._graphQLConfig);
|
||||
}
|
||||
/* eslint-enable no-implicit-coercion */
|
||||
}
|
||||
|
||||
const schema = await this._graphQLCache
|
||||
.getSchema(projectConfig.projectName, queryHasExtensions)
|
||||
.catch(() => null);
|
||||
|
||||
if (!schema) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return validateQuery(validationAst, schema, customRules, isRelayCompatMode);
|
||||
}
|
||||
|
||||
async getAutocompleteSuggestions(
|
||||
query: string,
|
||||
position: Position,
|
||||
filePath: Uri,
|
||||
): Promise<Array<CompletionItem>> {
|
||||
const projectConfig = this._graphQLConfig.getConfigForFile(filePath);
|
||||
const schema = await this._graphQLCache
|
||||
.getSchema(projectConfig.projectName)
|
||||
.catch(() => null);
|
||||
|
||||
if (schema) {
|
||||
return getAutocompleteSuggestions(schema, query, position);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
async getHoverInformation(
|
||||
query: string,
|
||||
position: Position,
|
||||
filePath: Uri,
|
||||
): Promise<Hover.contents> {
|
||||
const projectConfig = this._graphQLConfig.getConfigForFile(filePath);
|
||||
const schema = await this._graphQLCache
|
||||
.getSchema(projectConfig.projectName)
|
||||
.catch(() => null);
|
||||
|
||||
if (schema) {
|
||||
return getHoverInformation(schema, query, position);
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
async getDefinition(
|
||||
query: string,
|
||||
position: Position,
|
||||
filePath: Uri,
|
||||
): Promise<?DefinitionQueryResult> {
|
||||
const projectConfig = this._graphQLConfig.getConfigForFile(filePath);
|
||||
|
||||
let ast;
|
||||
try {
|
||||
ast = parse(query);
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const node = getASTNodeAtPosition(query, ast, position);
|
||||
if (node) {
|
||||
switch (node.kind) {
|
||||
case FRAGMENT_SPREAD:
|
||||
return this._getDefinitionForFragmentSpread(
|
||||
query,
|
||||
ast,
|
||||
node,
|
||||
filePath,
|
||||
projectConfig,
|
||||
);
|
||||
case FRAGMENT_DEFINITION:
|
||||
case OPERATION_DEFINITION:
|
||||
return getDefinitionQueryResultForDefinitionNode(
|
||||
filePath,
|
||||
query,
|
||||
(node: FragmentDefinitionNode | OperationDefinitionNode),
|
||||
);
|
||||
case NAMED_TYPE:
|
||||
return this._getDefinitionForNamedType(
|
||||
query,
|
||||
ast,
|
||||
node,
|
||||
filePath,
|
||||
projectConfig,
|
||||
);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async _getDefinitionForNamedType(
|
||||
query: string,
|
||||
ast: DocumentNode,
|
||||
node: NamedTypeNode,
|
||||
filePath: Uri,
|
||||
projectConfig: GraphQLProjectConfig,
|
||||
): Promise<?DefinitionQueryResult> {
|
||||
const objectTypeDefinitions = await this._graphQLCache.getObjectTypeDefinitions(
|
||||
projectConfig,
|
||||
);
|
||||
|
||||
const dependencies = await this._graphQLCache.getObjectTypeDependenciesForAST(
|
||||
ast,
|
||||
objectTypeDefinitions,
|
||||
);
|
||||
|
||||
const localObjectTypeDefinitions = ast.definitions.filter(
|
||||
definition =>
|
||||
definition.kind === OBJECT_TYPE_DEFINITION ||
|
||||
definition.kind === INPUT_OBJECT_TYPE_DEFINITION ||
|
||||
definition.kind === ENUM_TYPE_DEFINITION,
|
||||
);
|
||||
|
||||
const typeCastedDefs = ((localObjectTypeDefinitions: any): Array<
|
||||
TypeDefinitionNode,
|
||||
>);
|
||||
|
||||
const localOperationDefinationInfos = typeCastedDefs.map(
|
||||
(definition: TypeDefinitionNode) => ({
|
||||
filePath,
|
||||
content: query,
|
||||
definition,
|
||||
}),
|
||||
);
|
||||
|
||||
const result = await getDefinitionQueryResultForNamedType(
|
||||
query,
|
||||
node,
|
||||
dependencies.concat(localOperationDefinationInfos),
|
||||
);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async _getDefinitionForFragmentSpread(
|
||||
query: string,
|
||||
ast: DocumentNode,
|
||||
node: FragmentSpreadNode,
|
||||
filePath: Uri,
|
||||
projectConfig: GraphQLProjectConfig,
|
||||
): Promise<?DefinitionQueryResult> {
|
||||
const fragmentDefinitions = await this._graphQLCache.getFragmentDefinitions(
|
||||
projectConfig,
|
||||
);
|
||||
|
||||
const dependencies = await this._graphQLCache.getFragmentDependenciesForAST(
|
||||
ast,
|
||||
fragmentDefinitions,
|
||||
);
|
||||
|
||||
const localFragDefinitions = ast.definitions.filter(
|
||||
definition => definition.kind === FRAGMENT_DEFINITION,
|
||||
);
|
||||
|
||||
const typeCastedDefs = ((localFragDefinitions: any): Array<
|
||||
FragmentDefinitionNode,
|
||||
>);
|
||||
|
||||
const localFragInfos = typeCastedDefs.map(
|
||||
(definition: FragmentDefinitionNode) => ({
|
||||
filePath,
|
||||
content: query,
|
||||
definition,
|
||||
}),
|
||||
);
|
||||
|
||||
const result = await getDefinitionQueryResultForFragmentSpread(
|
||||
query,
|
||||
node,
|
||||
dependencies.concat(localFragInfos),
|
||||
);
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
@ -1,204 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
import type {GraphQLField, GraphQLSchema, GraphQLType} from 'graphql';
|
||||
import {isCompositeType} from 'graphql';
|
||||
import {
|
||||
SchemaMetaFieldDef,
|
||||
TypeMetaFieldDef,
|
||||
TypeNameMetaFieldDef,
|
||||
} from 'graphql/type/introspection';
|
||||
import type {
|
||||
CompletionItem,
|
||||
ContextToken,
|
||||
State,
|
||||
TypeInfo,
|
||||
} from 'graphql-language-service-types';
|
||||
|
||||
// Utility for returning the state representing the Definition this token state
|
||||
// is within, if any.
|
||||
export function getDefinitionState(tokenState: State): ?State {
|
||||
let definitionState;
|
||||
|
||||
forEachState(tokenState, state => {
|
||||
switch (state.kind) {
|
||||
case 'Query':
|
||||
case 'ShortQuery':
|
||||
case 'Mutation':
|
||||
case 'Subscription':
|
||||
case 'FragmentDefinition':
|
||||
definitionState = state;
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
return definitionState;
|
||||
}
|
||||
|
||||
// Gets the field definition given a type and field name
|
||||
export function getFieldDef(
|
||||
schema: GraphQLSchema,
|
||||
type: GraphQLType,
|
||||
fieldName: string,
|
||||
): ?GraphQLField<*, *> {
|
||||
if (fieldName === SchemaMetaFieldDef.name && schema.getQueryType() === type) {
|
||||
return SchemaMetaFieldDef;
|
||||
}
|
||||
if (fieldName === TypeMetaFieldDef.name && schema.getQueryType() === type) {
|
||||
return TypeMetaFieldDef;
|
||||
}
|
||||
if (fieldName === TypeNameMetaFieldDef.name && isCompositeType(type)) {
|
||||
return TypeNameMetaFieldDef;
|
||||
}
|
||||
if (type.getFields && typeof type.getFields === 'function') {
|
||||
return (type.getFields()[fieldName]: any);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// Utility for iterating through a CodeMirror parse state stack bottom-up.
|
||||
export function forEachState(
|
||||
stack: State,
|
||||
fn: (state: State) => ?TypeInfo,
|
||||
): void {
|
||||
const reverseStateStack = [];
|
||||
let state = stack;
|
||||
while (state && state.kind) {
|
||||
reverseStateStack.push(state);
|
||||
state = state.prevState;
|
||||
}
|
||||
for (let i = reverseStateStack.length - 1; i >= 0; i--) {
|
||||
fn(reverseStateStack[i]);
|
||||
}
|
||||
}
|
||||
|
||||
export function objectValues(object: Object): Array<any> {
|
||||
const keys = Object.keys(object);
|
||||
const len = keys.length;
|
||||
const values = new Array(len);
|
||||
for (let i = 0; i < len; ++i) {
|
||||
values[i] = object[keys[i]];
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
// Create the expected hint response given a possible list and a token
|
||||
export function hintList(
|
||||
token: ContextToken,
|
||||
list: Array<CompletionItem>,
|
||||
): Array<CompletionItem> {
|
||||
return filterAndSortList(list, normalizeText(token.string));
|
||||
}
|
||||
|
||||
// Given a list of hint entries and currently typed text, sort and filter to
|
||||
// provide a concise list.
|
||||
function filterAndSortList(
|
||||
list: Array<CompletionItem>,
|
||||
text: string,
|
||||
): Array<CompletionItem> {
|
||||
if (!text) {
|
||||
return filterNonEmpty(list, entry => !entry.isDeprecated);
|
||||
}
|
||||
|
||||
const byProximity = list.map(entry => ({
|
||||
proximity: getProximity(normalizeText(entry.label), text),
|
||||
entry,
|
||||
}));
|
||||
|
||||
const conciseMatches = filterNonEmpty(
|
||||
filterNonEmpty(byProximity, pair => pair.proximity <= 2),
|
||||
pair => !pair.entry.isDeprecated,
|
||||
);
|
||||
|
||||
const sortedMatches = conciseMatches.sort(
|
||||
(a, b) =>
|
||||
(a.entry.isDeprecated ? 1 : 0) - (b.entry.isDeprecated ? 1 : 0) ||
|
||||
a.proximity - b.proximity ||
|
||||
a.entry.label.length - b.entry.label.length,
|
||||
);
|
||||
|
||||
return sortedMatches.map(pair => pair.entry);
|
||||
}
|
||||
|
||||
// Filters the array by the predicate, unless it results in an empty array,
|
||||
// in which case return the original array.
|
||||
function filterNonEmpty(
|
||||
array: Array<Object>,
|
||||
predicate: (entry: Object) => boolean,
|
||||
): Array<Object> {
|
||||
const filtered = array.filter(predicate);
|
||||
return filtered.length === 0 ? array : filtered;
|
||||
}
|
||||
|
||||
function normalizeText(text: string): string {
|
||||
return text.toLowerCase().replace(/\W/g, '');
|
||||
}
|
||||
|
||||
// Determine a numeric proximity for a suggestion based on current text.
|
||||
function getProximity(suggestion: string, text: string): number {
|
||||
// start with lexical distance
|
||||
let proximity = lexicalDistance(text, suggestion);
|
||||
if (suggestion.length > text.length) {
|
||||
// do not penalize long suggestions.
|
||||
proximity -= suggestion.length - text.length - 1;
|
||||
// penalize suggestions not starting with this phrase
|
||||
proximity += suggestion.indexOf(text) === 0 ? 0 : 0.5;
|
||||
}
|
||||
return proximity;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the lexical distance between strings A and B.
|
||||
*
|
||||
* The "distance" between two strings is given by counting the minimum number
|
||||
* of edits needed to transform string A into string B. An edit can be an
|
||||
* insertion, deletion, or substitution of a single character, or a swap of two
|
||||
* adjacent characters.
|
||||
*
|
||||
* This distance can be useful for detecting typos in input or sorting
|
||||
*
|
||||
* @param {string} a
|
||||
* @param {string} b
|
||||
* @return {int} distance in number of edits
|
||||
*/
|
||||
function lexicalDistance(a: string, b: string): number {
|
||||
let i;
|
||||
let j;
|
||||
const d = [];
|
||||
const aLength = a.length;
|
||||
const bLength = b.length;
|
||||
|
||||
for (i = 0; i <= aLength; i++) {
|
||||
d[i] = [i];
|
||||
}
|
||||
|
||||
for (j = 1; j <= bLength; j++) {
|
||||
d[0][j] = j;
|
||||
}
|
||||
|
||||
for (i = 1; i <= aLength; i++) {
|
||||
for (j = 1; j <= bLength; j++) {
|
||||
const cost = a[i - 1] === b[j - 1] ? 0 : 1;
|
||||
|
||||
d[i][j] = Math.min(
|
||||
d[i - 1][j] + 1,
|
||||
d[i][j - 1] + 1,
|
||||
d[i - 1][j - 1] + cost,
|
||||
);
|
||||
|
||||
if (i > 1 && j > 1 && a[i - 1] === b[j - 2] && a[i - 2] === b[j - 1]) {
|
||||
d[i][j] = Math.min(d[i][j], d[i - 2][j - 2] + cost);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return d[aLength][bLength];
|
||||
}
|
@ -1,665 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
import type {
|
||||
FragmentDefinitionNode,
|
||||
GraphQLDirective,
|
||||
GraphQLSchema,
|
||||
} from 'graphql';
|
||||
import type {
|
||||
CompletionItem,
|
||||
ContextToken,
|
||||
State,
|
||||
TypeInfo,
|
||||
} from 'graphql-language-service-types';
|
||||
import type {Position} from 'graphql-language-service-utils';
|
||||
|
||||
import {
|
||||
GraphQLBoolean,
|
||||
GraphQLEnumType,
|
||||
GraphQLInputObjectType,
|
||||
GraphQLList,
|
||||
SchemaMetaFieldDef,
|
||||
TypeMetaFieldDef,
|
||||
TypeNameMetaFieldDef,
|
||||
assertAbstractType,
|
||||
doTypesOverlap,
|
||||
getNamedType,
|
||||
getNullableType,
|
||||
isAbstractType,
|
||||
isCompositeType,
|
||||
isInputType,
|
||||
} from 'graphql';
|
||||
import {CharacterStream, onlineParser} from 'graphql-language-service-parser';
|
||||
import {
|
||||
forEachState,
|
||||
getDefinitionState,
|
||||
getFieldDef,
|
||||
hintList,
|
||||
objectValues,
|
||||
} from './autocompleteUtils';
|
||||
|
||||
/**
|
||||
* Given GraphQLSchema, queryText, and context of the current position within
|
||||
* the source text, provide a list of typeahead entries.
|
||||
*/
|
||||
export function getAutocompleteSuggestions(
|
||||
schema: GraphQLSchema,
|
||||
queryText: string,
|
||||
cursor: Position,
|
||||
contextToken?: ContextToken,
|
||||
): Array<CompletionItem> {
|
||||
const token = contextToken || getTokenAtPosition(queryText, cursor);
|
||||
|
||||
const state =
|
||||
token.state.kind === 'Invalid' ? token.state.prevState : token.state;
|
||||
|
||||
// relieve flow errors by checking if `state` exists
|
||||
if (!state) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const kind = state.kind;
|
||||
const step = state.step;
|
||||
const typeInfo = getTypeInfo(schema, token.state);
|
||||
|
||||
// Definition kinds
|
||||
if (kind === 'Document') {
|
||||
return hintList(token, [
|
||||
{label: 'query'},
|
||||
{label: 'mutation'},
|
||||
{label: 'subscription'},
|
||||
{label: 'fragment'},
|
||||
{label: '{'},
|
||||
]);
|
||||
}
|
||||
|
||||
// Field names
|
||||
if (kind === 'SelectionSet' || kind === 'Field' || kind === 'AliasedField') {
|
||||
return getSuggestionsForFieldNames(token, typeInfo, schema);
|
||||
}
|
||||
|
||||
// Argument names
|
||||
if (kind === 'Arguments' || (kind === 'Argument' && step === 0)) {
|
||||
const argDefs = typeInfo.argDefs;
|
||||
if (argDefs) {
|
||||
return hintList(
|
||||
token,
|
||||
argDefs.map(argDef => ({
|
||||
label: argDef.name,
|
||||
detail: String(argDef.type),
|
||||
documentation: argDef.description,
|
||||
})),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Input Object fields
|
||||
if (kind === 'ObjectValue' || (kind === 'ObjectField' && step === 0)) {
|
||||
if (typeInfo.objectFieldDefs) {
|
||||
const objectFields = objectValues(typeInfo.objectFieldDefs);
|
||||
return hintList(
|
||||
token,
|
||||
objectFields.map(field => ({
|
||||
label: field.name,
|
||||
detail: String(field.type),
|
||||
documentation: field.description,
|
||||
})),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Input values: Enum and Boolean
|
||||
if (
|
||||
kind === 'EnumValue' ||
|
||||
(kind === 'ListValue' && step === 1) ||
|
||||
(kind === 'ObjectField' && step === 2) ||
|
||||
(kind === 'Argument' && step === 2)
|
||||
) {
|
||||
return getSuggestionsForInputValues(token, typeInfo);
|
||||
}
|
||||
|
||||
// Fragment type conditions
|
||||
if (
|
||||
(kind === 'TypeCondition' && step === 1) ||
|
||||
(kind === 'NamedType' &&
|
||||
state.prevState != null &&
|
||||
state.prevState.kind === 'TypeCondition')
|
||||
) {
|
||||
return getSuggestionsForFragmentTypeConditions(token, typeInfo, schema);
|
||||
}
|
||||
|
||||
// Fragment spread names
|
||||
if (kind === 'FragmentSpread' && step === 1) {
|
||||
return getSuggestionsForFragmentSpread(token, typeInfo, schema, queryText);
|
||||
}
|
||||
|
||||
// Variable definition types
|
||||
if (
|
||||
(kind === 'VariableDefinition' && step === 2) ||
|
||||
(kind === 'ListType' && step === 1) ||
|
||||
(kind === 'NamedType' &&
|
||||
state.prevState &&
|
||||
(state.prevState.kind === 'VariableDefinition' ||
|
||||
state.prevState.kind === 'ListType'))
|
||||
) {
|
||||
return getSuggestionsForVariableDefinition(token, schema);
|
||||
}
|
||||
|
||||
// Directive names
|
||||
if (kind === 'Directive') {
|
||||
return getSuggestionsForDirective(token, state, schema);
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
// Helper functions to get suggestions for each kinds
|
||||
function getSuggestionsForFieldNames(
|
||||
token: ContextToken,
|
||||
typeInfo: TypeInfo,
|
||||
schema: GraphQLSchema,
|
||||
): Array<CompletionItem> {
|
||||
if (typeInfo.parentType) {
|
||||
const parentType = typeInfo.parentType;
|
||||
const fields =
|
||||
parentType.getFields instanceof Function
|
||||
? objectValues(parentType.getFields())
|
||||
: [];
|
||||
if (isAbstractType(parentType)) {
|
||||
fields.push(TypeNameMetaFieldDef);
|
||||
}
|
||||
if (parentType === schema.getQueryType()) {
|
||||
fields.push(SchemaMetaFieldDef, TypeMetaFieldDef);
|
||||
}
|
||||
return hintList(
|
||||
token,
|
||||
fields.map(field => ({
|
||||
label: field.name,
|
||||
detail: String(field.type),
|
||||
documentation: field.description,
|
||||
isDeprecated: field.isDeprecated,
|
||||
deprecationReason: field.deprecationReason,
|
||||
})),
|
||||
);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
function getSuggestionsForInputValues(
|
||||
token: ContextToken,
|
||||
typeInfo: TypeInfo,
|
||||
): Array<CompletionItem> {
|
||||
const namedInputType = getNamedType(typeInfo.inputType);
|
||||
if (namedInputType instanceof GraphQLEnumType) {
|
||||
const values = namedInputType.getValues();
|
||||
return hintList(
|
||||
token,
|
||||
values.map(value => ({
|
||||
label: value.name,
|
||||
detail: String(namedInputType),
|
||||
documentation: value.description,
|
||||
isDeprecated: value.isDeprecated,
|
||||
deprecationReason: value.deprecationReason,
|
||||
})),
|
||||
);
|
||||
} else if (namedInputType === GraphQLBoolean) {
|
||||
return hintList(token, [
|
||||
{
|
||||
label: 'true',
|
||||
detail: String(GraphQLBoolean),
|
||||
documentation: 'Not false.',
|
||||
},
|
||||
{
|
||||
label: 'false',
|
||||
detail: String(GraphQLBoolean),
|
||||
documentation: 'Not true.',
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
function getSuggestionsForFragmentTypeConditions(
|
||||
token: ContextToken,
|
||||
typeInfo: TypeInfo,
|
||||
schema: GraphQLSchema,
|
||||
): Array<CompletionItem> {
|
||||
let possibleTypes;
|
||||
if (typeInfo.parentType) {
|
||||
if (isAbstractType(typeInfo.parentType)) {
|
||||
const abstractType = assertAbstractType(typeInfo.parentType);
|
||||
// Collect both the possible Object types as well as the interfaces
|
||||
// they implement.
|
||||
const possibleObjTypes = schema.getPossibleTypes(abstractType);
|
||||
const possibleIfaceMap = Object.create(null);
|
||||
possibleObjTypes.forEach(type => {
|
||||
type.getInterfaces().forEach(iface => {
|
||||
possibleIfaceMap[iface.name] = iface;
|
||||
});
|
||||
});
|
||||
possibleTypes = possibleObjTypes.concat(objectValues(possibleIfaceMap));
|
||||
} else {
|
||||
// The parent type is a non-abstract Object type, so the only possible
|
||||
// type that can be used is that same type.
|
||||
possibleTypes = [typeInfo.parentType];
|
||||
}
|
||||
} else {
|
||||
const typeMap = schema.getTypeMap();
|
||||
possibleTypes = objectValues(typeMap).filter(isCompositeType);
|
||||
}
|
||||
return hintList(
|
||||
token,
|
||||
possibleTypes.map(type => {
|
||||
const namedType = getNamedType(type);
|
||||
return {
|
||||
label: String(type),
|
||||
documentation: (namedType && namedType.description) || '',
|
||||
};
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
function getSuggestionsForFragmentSpread(
|
||||
token: ContextToken,
|
||||
typeInfo: TypeInfo,
|
||||
schema: GraphQLSchema,
|
||||
queryText: string,
|
||||
): Array<CompletionItem> {
|
||||
const typeMap = schema.getTypeMap();
|
||||
const defState = getDefinitionState(token.state);
|
||||
const fragments = getFragmentDefinitions(queryText);
|
||||
|
||||
// Filter down to only the fragments which may exist here.
|
||||
const relevantFrags = fragments.filter(
|
||||
frag =>
|
||||
// Only include fragments with known types.
|
||||
typeMap[frag.typeCondition.name.value] &&
|
||||
// Only include fragments which are not cyclic.
|
||||
!(
|
||||
defState &&
|
||||
defState.kind === 'FragmentDefinition' &&
|
||||
defState.name === frag.name.value
|
||||
) &&
|
||||
// Only include fragments which could possibly be spread here.
|
||||
isCompositeType(typeInfo.parentType) &&
|
||||
isCompositeType(typeMap[frag.typeCondition.name.value]) &&
|
||||
doTypesOverlap(
|
||||
schema,
|
||||
typeInfo.parentType,
|
||||
typeMap[frag.typeCondition.name.value],
|
||||
),
|
||||
);
|
||||
|
||||
return hintList(
|
||||
token,
|
||||
relevantFrags.map(frag => ({
|
||||
label: frag.name.value,
|
||||
detail: String(typeMap[frag.typeCondition.name.value]),
|
||||
documentation: `fragment ${frag.name.value} on ${
|
||||
frag.typeCondition.name.value
|
||||
}`,
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
function getFragmentDefinitions(
|
||||
queryText: string,
|
||||
): Array<FragmentDefinitionNode> {
|
||||
const fragmentDefs = [];
|
||||
runOnlineParser(queryText, (_, state) => {
|
||||
if (state.kind === 'FragmentDefinition' && state.name && state.type) {
|
||||
fragmentDefs.push({
|
||||
kind: 'FragmentDefinition',
|
||||
name: {
|
||||
kind: 'Name',
|
||||
value: state.name,
|
||||
},
|
||||
selectionSet: {
|
||||
kind: 'SelectionSet',
|
||||
selections: [],
|
||||
},
|
||||
typeCondition: {
|
||||
kind: 'NamedType',
|
||||
name: {
|
||||
kind: 'Name',
|
||||
value: state.type,
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return fragmentDefs;
|
||||
}
|
||||
|
||||
function getSuggestionsForVariableDefinition(
|
||||
token: ContextToken,
|
||||
schema: GraphQLSchema,
|
||||
): Array<CompletionItem> {
|
||||
const inputTypeMap = schema.getTypeMap();
|
||||
const inputTypes = objectValues(inputTypeMap).filter(isInputType);
|
||||
return hintList(
|
||||
token,
|
||||
inputTypes.map(type => ({
|
||||
label: type.name,
|
||||
documentation: type.description,
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
function getSuggestionsForDirective(
|
||||
token: ContextToken,
|
||||
state: State,
|
||||
schema: GraphQLSchema,
|
||||
): Array<CompletionItem> {
|
||||
if (state.prevState && state.prevState.kind) {
|
||||
const directives = schema
|
||||
.getDirectives()
|
||||
.filter(directive => canUseDirective(state.prevState, directive));
|
||||
return hintList(
|
||||
token,
|
||||
directives.map(directive => ({
|
||||
label: directive.name,
|
||||
documentation: directive.description || '',
|
||||
})),
|
||||
);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
export function getTokenAtPosition(
|
||||
queryText: string,
|
||||
cursor: Position,
|
||||
): ContextToken {
|
||||
let styleAtCursor = null;
|
||||
let stateAtCursor = null;
|
||||
let stringAtCursor = null;
|
||||
const token = runOnlineParser(queryText, (stream, state, style, index) => {
|
||||
if (index === cursor.line) {
|
||||
if (stream.getCurrentPosition() >= cursor.character) {
|
||||
styleAtCursor = style;
|
||||
stateAtCursor = {...state};
|
||||
stringAtCursor = stream.current();
|
||||
return 'BREAK';
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Return the state/style of parsed token in case those at cursor aren't
|
||||
// available.
|
||||
return {
|
||||
start: token.start,
|
||||
end: token.end,
|
||||
string: stringAtCursor || token.string,
|
||||
state: stateAtCursor || token.state,
|
||||
style: styleAtCursor || token.style,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides an utility function to parse a given query text and construct a
|
||||
* `token` context object.
|
||||
* A token context provides useful information about the token/style that
|
||||
* CharacterStream currently possesses, as well as the end state and style
|
||||
* of the token.
|
||||
*/
|
||||
type callbackFnType = (
|
||||
stream: CharacterStream,
|
||||
state: State,
|
||||
style: string,
|
||||
index: number,
|
||||
) => void | 'BREAK';
|
||||
|
||||
function runOnlineParser(
|
||||
queryText: string,
|
||||
callback: callbackFnType,
|
||||
): ContextToken {
|
||||
const lines = queryText.split('\n');
|
||||
const parser = onlineParser();
|
||||
let state = parser.startState();
|
||||
let style = '';
|
||||
|
||||
let stream: CharacterStream = new CharacterStream('');
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
stream = new CharacterStream(lines[i]);
|
||||
while (!stream.eol()) {
|
||||
style = parser.token(stream, state);
|
||||
const code = callback(stream, state, style, i);
|
||||
if (code === 'BREAK') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Above while loop won't run if there is an empty line.
|
||||
// Run the callback one more time to catch this.
|
||||
callback(stream, state, style, i);
|
||||
|
||||
if (!state.kind) {
|
||||
state = parser.startState();
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
start: stream.getStartOfToken(),
|
||||
end: stream.getCurrentPosition(),
|
||||
string: stream.current(),
|
||||
state,
|
||||
style,
|
||||
};
|
||||
}
|
||||
|
||||
function canUseDirective(
|
||||
state: $PropertyType<State, 'prevState'>,
|
||||
directive: GraphQLDirective,
|
||||
): boolean {
|
||||
if (!state || !state.kind) {
|
||||
return false;
|
||||
}
|
||||
const kind = state.kind;
|
||||
const locations = directive.locations;
|
||||
switch (kind) {
|
||||
case 'Query':
|
||||
return locations.indexOf('QUERY') !== -1;
|
||||
case 'Mutation':
|
||||
return locations.indexOf('MUTATION') !== -1;
|
||||
case 'Subscription':
|
||||
return locations.indexOf('SUBSCRIPTION') !== -1;
|
||||
case 'Field':
|
||||
case 'AliasedField':
|
||||
return locations.indexOf('FIELD') !== -1;
|
||||
case 'FragmentDefinition':
|
||||
return locations.indexOf('FRAGMENT_DEFINITION') !== -1;
|
||||
case 'FragmentSpread':
|
||||
return locations.indexOf('FRAGMENT_SPREAD') !== -1;
|
||||
case 'InlineFragment':
|
||||
return locations.indexOf('INLINE_FRAGMENT') !== -1;
|
||||
|
||||
// Schema Definitions
|
||||
case 'SchemaDef':
|
||||
return locations.indexOf('SCHEMA') !== -1;
|
||||
case 'ScalarDef':
|
||||
return locations.indexOf('SCALAR') !== -1;
|
||||
case 'ObjectTypeDef':
|
||||
return locations.indexOf('OBJECT') !== -1;
|
||||
case 'FieldDef':
|
||||
return locations.indexOf('FIELD_DEFINITION') !== -1;
|
||||
case 'InterfaceDef':
|
||||
return locations.indexOf('INTERFACE') !== -1;
|
||||
case 'UnionDef':
|
||||
return locations.indexOf('UNION') !== -1;
|
||||
case 'EnumDef':
|
||||
return locations.indexOf('ENUM') !== -1;
|
||||
case 'EnumValue':
|
||||
return locations.indexOf('ENUM_VALUE') !== -1;
|
||||
case 'InputDef':
|
||||
return locations.indexOf('INPUT_OBJECT') !== -1;
|
||||
case 'InputValueDef':
|
||||
const prevStateKind = state.prevState && state.prevState.kind;
|
||||
switch (prevStateKind) {
|
||||
case 'ArgumentsDef':
|
||||
return locations.indexOf('ARGUMENT_DEFINITION') !== -1;
|
||||
case 'InputDef':
|
||||
return locations.indexOf('INPUT_FIELD_DEFINITION') !== -1;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Utility for collecting rich type information given any token's state
|
||||
// from the graphql-mode parser.
|
||||
export function getTypeInfo(
|
||||
schema: GraphQLSchema,
|
||||
tokenState: State,
|
||||
): TypeInfo {
|
||||
let argDef;
|
||||
let argDefs;
|
||||
let directiveDef;
|
||||
let enumValue;
|
||||
let fieldDef;
|
||||
let inputType;
|
||||
let objectFieldDefs;
|
||||
let parentType;
|
||||
let type;
|
||||
|
||||
forEachState(tokenState, state => {
|
||||
switch (state.kind) {
|
||||
case 'Query':
|
||||
case 'ShortQuery':
|
||||
type = schema.getQueryType();
|
||||
break;
|
||||
case 'Mutation':
|
||||
type = schema.getMutationType();
|
||||
break;
|
||||
case 'Subscription':
|
||||
type = schema.getSubscriptionType();
|
||||
break;
|
||||
case 'InlineFragment':
|
||||
case 'FragmentDefinition':
|
||||
if (state.type) {
|
||||
type = schema.getType(state.type);
|
||||
}
|
||||
break;
|
||||
case 'Field':
|
||||
case 'AliasedField':
|
||||
if (!type || !state.name) {
|
||||
fieldDef = null;
|
||||
} else {
|
||||
fieldDef = parentType
|
||||
? getFieldDef(schema, parentType, state.name)
|
||||
: null;
|
||||
type = fieldDef ? fieldDef.type : null;
|
||||
}
|
||||
break;
|
||||
case 'SelectionSet':
|
||||
parentType = getNamedType(type);
|
||||
break;
|
||||
case 'Directive':
|
||||
directiveDef = state.name ? schema.getDirective(state.name) : null;
|
||||
break;
|
||||
case 'Arguments':
|
||||
if (!state.prevState) {
|
||||
argDefs = null;
|
||||
} else {
|
||||
switch (state.prevState.kind) {
|
||||
case 'Field':
|
||||
argDefs = fieldDef && fieldDef.args;
|
||||
break;
|
||||
case 'Directive':
|
||||
argDefs = directiveDef && directiveDef.args;
|
||||
break;
|
||||
case 'AliasedField':
|
||||
const name = state.prevState && state.prevState.name;
|
||||
if (!name) {
|
||||
argDefs = null;
|
||||
break;
|
||||
}
|
||||
const field = parentType
|
||||
? getFieldDef(schema, parentType, name)
|
||||
: null;
|
||||
if (!field) {
|
||||
argDefs = null;
|
||||
break;
|
||||
}
|
||||
argDefs = field.args;
|
||||
break;
|
||||
default:
|
||||
argDefs = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'Argument':
|
||||
if (argDefs) {
|
||||
for (let i = 0; i < argDefs.length; i++) {
|
||||
if (argDefs[i].name === state.name) {
|
||||
argDef = argDefs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
inputType = argDef && argDef.type;
|
||||
break;
|
||||
case 'EnumValue':
|
||||
const enumType = getNamedType(inputType);
|
||||
enumValue =
|
||||
enumType instanceof GraphQLEnumType
|
||||
? find(enumType.getValues(), val => val.value === state.name)
|
||||
: null;
|
||||
break;
|
||||
case 'ListValue':
|
||||
const nullableType = getNullableType(inputType);
|
||||
inputType =
|
||||
nullableType instanceof GraphQLList ? nullableType.ofType : null;
|
||||
break;
|
||||
case 'ObjectValue':
|
||||
const objectType = getNamedType(inputType);
|
||||
objectFieldDefs =
|
||||
objectType instanceof GraphQLInputObjectType
|
||||
? objectType.getFields()
|
||||
: null;
|
||||
break;
|
||||
case 'ObjectField':
|
||||
const objectField =
|
||||
state.name && objectFieldDefs ? objectFieldDefs[state.name] : null;
|
||||
inputType = objectField && objectField.type;
|
||||
break;
|
||||
case 'NamedType':
|
||||
if (state.name) {
|
||||
type = schema.getType(state.name);
|
||||
}
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
argDef,
|
||||
argDefs,
|
||||
directiveDef,
|
||||
enumValue,
|
||||
fieldDef,
|
||||
inputType,
|
||||
objectFieldDefs,
|
||||
parentType,
|
||||
type,
|
||||
};
|
||||
}
|
||||
|
||||
// Returns the first item in the array which causes predicate to return truthy.
|
||||
function find(array, predicate) {
|
||||
for (let i = 0; i < array.length; i++) {
|
||||
if (predicate(array[i])) {
|
||||
return array[i];
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
@ -1,136 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
import type {
|
||||
ASTNode,
|
||||
FragmentSpreadNode,
|
||||
FragmentDefinitionNode,
|
||||
OperationDefinitionNode,
|
||||
NamedTypeNode,
|
||||
TypeDefinitionNode,
|
||||
} from 'graphql';
|
||||
import type {
|
||||
Definition,
|
||||
DefinitionQueryResult,
|
||||
FragmentInfo,
|
||||
Position,
|
||||
Range,
|
||||
Uri,
|
||||
ObjectTypeInfo,
|
||||
} from 'graphql-language-service-types';
|
||||
import {locToRange, offsetToPosition} from 'graphql-language-service-utils';
|
||||
import invariant from 'assert';
|
||||
|
||||
export const LANGUAGE = 'GraphQL';
|
||||
|
||||
function getRange(text: string, node: ASTNode): Range {
|
||||
const location = node.loc;
|
||||
invariant(location, 'Expected ASTNode to have a location.');
|
||||
return locToRange(text, location);
|
||||
}
|
||||
|
||||
function getPosition(text: string, node: ASTNode): Position {
|
||||
const location = node.loc;
|
||||
invariant(location, 'Expected ASTNode to have a location.');
|
||||
return offsetToPosition(text, location.start);
|
||||
}
|
||||
|
||||
export async function getDefinitionQueryResultForNamedType(
|
||||
text: string,
|
||||
node: NamedTypeNode,
|
||||
dependencies: Array<ObjectTypeInfo>,
|
||||
): Promise<DefinitionQueryResult> {
|
||||
const name = node.name.value;
|
||||
const defNodes = dependencies.filter(
|
||||
({definition}) => definition.name && definition.name.value === name,
|
||||
);
|
||||
if (defNodes.length === 0) {
|
||||
process.stderr.write(`Definition not found for GraphQL type ${name}`);
|
||||
return {queryRange: [], definitions: []};
|
||||
}
|
||||
const definitions: Array<Definition> = defNodes.map(
|
||||
({filePath, content, definition}) =>
|
||||
getDefinitionForNodeDefinition(filePath || '', content, definition),
|
||||
);
|
||||
return {
|
||||
definitions,
|
||||
queryRange: definitions.map(_ => getRange(text, node)),
|
||||
};
|
||||
}
|
||||
|
||||
export async function getDefinitionQueryResultForFragmentSpread(
|
||||
text: string,
|
||||
fragment: FragmentSpreadNode,
|
||||
dependencies: Array<FragmentInfo>,
|
||||
): Promise<DefinitionQueryResult> {
|
||||
const name = fragment.name.value;
|
||||
const defNodes = dependencies.filter(
|
||||
({definition}) => definition.name.value === name,
|
||||
);
|
||||
if (defNodes.length === 0) {
|
||||
process.stderr.write(`Definition not found for GraphQL fragment ${name}`);
|
||||
return {queryRange: [], definitions: []};
|
||||
}
|
||||
const definitions: Array<Definition> = defNodes.map(
|
||||
({filePath, content, definition}) =>
|
||||
getDefinitionForFragmentDefinition(filePath || '', content, definition),
|
||||
);
|
||||
return {
|
||||
definitions,
|
||||
queryRange: definitions.map(_ => getRange(text, fragment)),
|
||||
};
|
||||
}
|
||||
|
||||
export function getDefinitionQueryResultForDefinitionNode(
|
||||
path: Uri,
|
||||
text: string,
|
||||
definition: FragmentDefinitionNode | OperationDefinitionNode,
|
||||
): DefinitionQueryResult {
|
||||
return {
|
||||
definitions: [getDefinitionForFragmentDefinition(path, text, definition)],
|
||||
queryRange: definition.name ? [getRange(text, definition.name)] : [],
|
||||
};
|
||||
}
|
||||
|
||||
function getDefinitionForFragmentDefinition(
|
||||
path: Uri,
|
||||
text: string,
|
||||
definition: FragmentDefinitionNode | OperationDefinitionNode,
|
||||
): Definition {
|
||||
const name = definition.name;
|
||||
invariant(name, 'Expected ASTNode to have a Name.');
|
||||
return {
|
||||
path,
|
||||
position: getPosition(text, definition),
|
||||
range: getRange(text, definition),
|
||||
name: name.value || '',
|
||||
language: LANGUAGE,
|
||||
// This is a file inside the project root, good enough for now
|
||||
projectRoot: path,
|
||||
};
|
||||
}
|
||||
|
||||
function getDefinitionForNodeDefinition(
|
||||
path: Uri,
|
||||
text: string,
|
||||
definition: TypeDefinitionNode,
|
||||
): Definition {
|
||||
const name = definition.name;
|
||||
invariant(name, 'Expected ASTNode to have a Name.');
|
||||
return {
|
||||
path,
|
||||
position: getPosition(text, definition),
|
||||
range: getRange(text, definition),
|
||||
name: name.value || '',
|
||||
language: LANGUAGE,
|
||||
// This is a file inside the project root, good enough for now
|
||||
projectRoot: path,
|
||||
};
|
||||
}
|
@ -1,172 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
import type {
|
||||
ASTNode,
|
||||
DocumentNode,
|
||||
GraphQLError,
|
||||
GraphQLSchema,
|
||||
Location,
|
||||
SourceLocation,
|
||||
} from 'graphql';
|
||||
import type {
|
||||
Diagnostic,
|
||||
CustomValidationRule,
|
||||
} from 'graphql-language-service-types';
|
||||
|
||||
import invariant from 'assert';
|
||||
import {findDeprecatedUsages, parse} from 'graphql';
|
||||
import {CharacterStream, onlineParser} from 'graphql-language-service-parser';
|
||||
import {
|
||||
Position,
|
||||
Range,
|
||||
validateWithCustomRules,
|
||||
} from 'graphql-language-service-utils';
|
||||
|
||||
export const SEVERITY = {
|
||||
ERROR: 1,
|
||||
WARNING: 2,
|
||||
INFORMATION: 3,
|
||||
HINT: 4,
|
||||
};
|
||||
|
||||
export function getDiagnostics(
|
||||
query: string,
|
||||
schema: ?GraphQLSchema = null,
|
||||
customRules?: Array<CustomValidationRule>,
|
||||
isRelayCompatMode?: boolean,
|
||||
): Array<Diagnostic> {
|
||||
let ast = null;
|
||||
try {
|
||||
ast = parse(query);
|
||||
} catch (error) {
|
||||
const range = getRange(error.locations[0], query);
|
||||
return [
|
||||
{
|
||||
severity: SEVERITY.ERROR,
|
||||
message: error.message,
|
||||
source: 'GraphQL: Syntax',
|
||||
range,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
return validateQuery(ast, schema, customRules, isRelayCompatMode);
|
||||
}
|
||||
|
||||
export function validateQuery(
|
||||
ast: DocumentNode,
|
||||
schema: ?GraphQLSchema = null,
|
||||
customRules?: Array<CustomValidationRule>,
|
||||
isRelayCompatMode?: boolean,
|
||||
): Array<Diagnostic> {
|
||||
// We cannot validate the query unless a schema is provided.
|
||||
if (!schema) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const validationErrorAnnotations = mapCat(
|
||||
validateWithCustomRules(schema, ast, customRules, isRelayCompatMode),
|
||||
error => annotations(error, SEVERITY.ERROR, 'Validation'),
|
||||
);
|
||||
// Note: findDeprecatedUsages was added in graphql@0.9.0, but we want to
|
||||
// support older versions of graphql-js.
|
||||
const deprecationWarningAnnotations = !findDeprecatedUsages
|
||||
? []
|
||||
: mapCat(findDeprecatedUsages(schema, ast), error =>
|
||||
annotations(error, SEVERITY.WARNING, 'Deprecation'),
|
||||
);
|
||||
return validationErrorAnnotations.concat(deprecationWarningAnnotations);
|
||||
}
|
||||
|
||||
// General utility for map-cating (aka flat-mapping).
|
||||
function mapCat<T>(
|
||||
array: Array<T>,
|
||||
mapper: (item: T) => Array<any>,
|
||||
): Array<any> {
|
||||
return Array.prototype.concat.apply([], array.map(mapper));
|
||||
}
|
||||
|
||||
function annotations(
|
||||
error: GraphQLError,
|
||||
severity: number,
|
||||
type: string,
|
||||
): Array<Diagnostic> {
|
||||
if (!error.nodes) {
|
||||
return [];
|
||||
}
|
||||
return error.nodes.map(node => {
|
||||
const highlightNode =
|
||||
node.kind !== 'Variable' && node.name
|
||||
? node.name
|
||||
: node.variable
|
||||
? node.variable
|
||||
: node;
|
||||
|
||||
invariant(error.locations, 'GraphQL validation error requires locations.');
|
||||
const loc = error.locations[0];
|
||||
const highlightLoc = getLocation(highlightNode);
|
||||
const end = loc.column + (highlightLoc.end - highlightLoc.start);
|
||||
return {
|
||||
source: `GraphQL: ${type}`,
|
||||
message: error.message,
|
||||
severity,
|
||||
range: new Range(
|
||||
new Position(loc.line - 1, loc.column - 1),
|
||||
new Position(loc.line - 1, end),
|
||||
),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export function getRange(location: SourceLocation, queryText: string) {
|
||||
const parser = onlineParser();
|
||||
const state = parser.startState();
|
||||
const lines = queryText.split('\n');
|
||||
|
||||
invariant(
|
||||
lines.length >= location.line,
|
||||
'Query text must have more lines than where the error happened',
|
||||
);
|
||||
|
||||
let stream = null;
|
||||
|
||||
for (let i = 0; i < location.line; i++) {
|
||||
stream = new CharacterStream(lines[i]);
|
||||
while (!stream.eol()) {
|
||||
const style = parser.token(stream, state);
|
||||
if (style === 'invalidchar') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
invariant(stream, 'Expected Parser stream to be available.');
|
||||
|
||||
const line = location.line - 1;
|
||||
const start = stream.getStartOfToken();
|
||||
const end = stream.getCurrentPosition();
|
||||
|
||||
return new Range(new Position(line, start), new Position(line, end));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get location info from a node in a type-safe way.
|
||||
*
|
||||
* The only way a node could not have a location is if we initialized the parser
|
||||
* (and therefore the lexer) with the `noLocation` option, but we always
|
||||
* call `parse` without options above.
|
||||
*/
|
||||
function getLocation(node: any): Location {
|
||||
const typeCastedNode = (node: ASTNode);
|
||||
const location = typeCastedNode.loc;
|
||||
invariant(location, 'Expected ASTNode to have a location.');
|
||||
return location;
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
/**
|
||||
* Ported from codemirror-graphql
|
||||
* https://github.com/graphql/codemirror-graphql/blob/master/src/info.js
|
||||
*/
|
||||
|
||||
import type {GraphQLSchema} from 'graphql';
|
||||
import type {ContextToken} from 'graphql-language-service-types';
|
||||
import type {Hover} from 'vscode-languageserver-types';
|
||||
import type {Position} from 'graphql-language-service-utils';
|
||||
import {getTokenAtPosition, getTypeInfo} from './getAutocompleteSuggestions';
|
||||
import {GraphQLNonNull, GraphQLList} from 'graphql';
|
||||
|
||||
export function getHoverInformation(
|
||||
schema: GraphQLSchema,
|
||||
queryText: string,
|
||||
cursor: Position,
|
||||
contextToken?: ContextToken,
|
||||
): Hover.contents {
|
||||
const token = contextToken || getTokenAtPosition(queryText, cursor);
|
||||
|
||||
if (!schema || !token || !token.state) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const state = token.state;
|
||||
const kind = state.kind;
|
||||
const step = state.step;
|
||||
const typeInfo = getTypeInfo(schema, token.state);
|
||||
const options = {schema};
|
||||
|
||||
// Given a Schema and a Token, produce the contents of an info tooltip.
|
||||
// To do this, create a div element that we will render "into" and then pass
|
||||
// it to various rendering functions.
|
||||
if (
|
||||
(kind === 'Field' && step === 0 && typeInfo.fieldDef) ||
|
||||
(kind === 'AliasedField' && step === 2 && typeInfo.fieldDef)
|
||||
) {
|
||||
const into = [];
|
||||
renderField(into, typeInfo, options);
|
||||
renderDescription(into, options, typeInfo.fieldDef);
|
||||
return into.join('').trim();
|
||||
} else if (kind === 'Directive' && step === 1 && typeInfo.directiveDef) {
|
||||
const into = [];
|
||||
renderDirective(into, typeInfo, options);
|
||||
renderDescription(into, options, typeInfo.directiveDef);
|
||||
return into.join('').trim();
|
||||
} else if (kind === 'Argument' && step === 0 && typeInfo.argDef) {
|
||||
const into = [];
|
||||
renderArg(into, typeInfo, options);
|
||||
renderDescription(into, options, typeInfo.argDef);
|
||||
return into.join('').trim();
|
||||
} else if (
|
||||
kind === 'EnumValue' &&
|
||||
typeInfo.enumValue &&
|
||||
typeInfo.enumValue.description
|
||||
) {
|
||||
const into = [];
|
||||
renderEnumValue(into, typeInfo, options);
|
||||
renderDescription(into, options, typeInfo.enumValue);
|
||||
return into.join('').trim();
|
||||
} else if (
|
||||
kind === 'NamedType' &&
|
||||
typeInfo.type &&
|
||||
typeInfo.type.description
|
||||
) {
|
||||
const into = [];
|
||||
renderType(into, typeInfo, options, typeInfo.type);
|
||||
renderDescription(into, options, typeInfo.type);
|
||||
return into.join('').trim();
|
||||
}
|
||||
}
|
||||
|
||||
function renderField(into, typeInfo, options) {
|
||||
renderQualifiedField(into, typeInfo, options);
|
||||
renderTypeAnnotation(into, typeInfo, options, typeInfo.type);
|
||||
}
|
||||
|
||||
function renderQualifiedField(into, typeInfo, options) {
|
||||
if (!typeInfo.fieldDef) {
|
||||
return;
|
||||
}
|
||||
const fieldName = (typeInfo.fieldDef.name: string);
|
||||
if (fieldName.slice(0, 2) !== '__') {
|
||||
renderType(into, typeInfo, options, typeInfo.parentType);
|
||||
text(into, '.');
|
||||
}
|
||||
text(into, fieldName);
|
||||
}
|
||||
|
||||
function renderDirective(into, typeInfo, options) {
|
||||
if (!typeInfo.directiveDef) {
|
||||
return;
|
||||
}
|
||||
const name = '@' + typeInfo.directiveDef.name;
|
||||
text(into, name);
|
||||
}
|
||||
|
||||
function renderArg(into, typeInfo, options) {
|
||||
if (typeInfo.directiveDef) {
|
||||
renderDirective(into, typeInfo, options);
|
||||
} else if (typeInfo.fieldDef) {
|
||||
renderQualifiedField(into, typeInfo, options);
|
||||
}
|
||||
|
||||
if (!typeInfo.argDef) {
|
||||
return;
|
||||
}
|
||||
|
||||
const name = typeInfo.argDef.name;
|
||||
text(into, '(');
|
||||
text(into, name);
|
||||
renderTypeAnnotation(into, typeInfo, options, typeInfo.inputType);
|
||||
text(into, ')');
|
||||
}
|
||||
|
||||
function renderTypeAnnotation(into, typeInfo, options, t) {
|
||||
text(into, ': ');
|
||||
renderType(into, typeInfo, options, t);
|
||||
}
|
||||
|
||||
function renderEnumValue(into, typeInfo, options) {
|
||||
if (!typeInfo.enumValue) {
|
||||
return;
|
||||
}
|
||||
const name = typeInfo.enumValue.name;
|
||||
renderType(into, typeInfo, options, typeInfo.inputType);
|
||||
text(into, '.');
|
||||
text(into, name);
|
||||
}
|
||||
|
||||
function renderType(into, typeInfo, options, t) {
|
||||
if (!t) {
|
||||
return;
|
||||
}
|
||||
if (t instanceof GraphQLNonNull) {
|
||||
renderType(into, typeInfo, options, t.ofType);
|
||||
text(into, '!');
|
||||
} else if (t instanceof GraphQLList) {
|
||||
text(into, '[');
|
||||
renderType(into, typeInfo, options, t.ofType);
|
||||
text(into, ']');
|
||||
} else {
|
||||
text(into, t.name);
|
||||
}
|
||||
}
|
||||
|
||||
function renderDescription(into, options, def) {
|
||||
if (!def) {
|
||||
return;
|
||||
}
|
||||
const description =
|
||||
typeof def.description === 'string' ? def.description : null;
|
||||
if (description) {
|
||||
text(into, '\n\n');
|
||||
text(into, description);
|
||||
}
|
||||
renderDeprecation(into, options, def);
|
||||
}
|
||||
|
||||
function renderDeprecation(into, options, def) {
|
||||
if (!def) {
|
||||
return;
|
||||
}
|
||||
const reason =
|
||||
typeof def.deprecationReason === 'string' ? def.deprecationReason : null;
|
||||
if (!reason) {
|
||||
return;
|
||||
}
|
||||
text(into, '\n\n');
|
||||
text(into, 'Deprecated: ');
|
||||
text(into, reason);
|
||||
}
|
||||
|
||||
function text(into: string[], content: string) {
|
||||
into.push(content);
|
||||
}
|
@ -1,121 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
import type {
|
||||
Outline,
|
||||
TextToken,
|
||||
TokenKind,
|
||||
} from 'graphql-language-service-types';
|
||||
|
||||
import {Kind, parse, visit} from 'graphql';
|
||||
import {offsetToPosition} from 'graphql-language-service-utils';
|
||||
|
||||
const {INLINE_FRAGMENT} = Kind;
|
||||
|
||||
const OUTLINEABLE_KINDS = {
|
||||
Field: true,
|
||||
OperationDefinition: true,
|
||||
Document: true,
|
||||
SelectionSet: true,
|
||||
Name: true,
|
||||
FragmentDefinition: true,
|
||||
FragmentSpread: true,
|
||||
InlineFragment: true,
|
||||
};
|
||||
|
||||
type OutlineTreeConverterType = {[name: string]: Function};
|
||||
|
||||
export function getOutline(queryText: string): ?Outline {
|
||||
let ast;
|
||||
try {
|
||||
ast = parse(queryText);
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const visitorFns = outlineTreeConverter(queryText);
|
||||
const outlineTrees = visit(ast, {
|
||||
leave(node) {
|
||||
if (
|
||||
OUTLINEABLE_KINDS.hasOwnProperty(node.kind) &&
|
||||
visitorFns[node.kind]
|
||||
) {
|
||||
return visitorFns[node.kind](node);
|
||||
}
|
||||
return null;
|
||||
},
|
||||
});
|
||||
return {outlineTrees};
|
||||
}
|
||||
|
||||
function outlineTreeConverter(docText: string): OutlineTreeConverterType {
|
||||
const meta = node => ({
|
||||
representativeName: node.name,
|
||||
startPosition: offsetToPosition(docText, node.loc.start),
|
||||
endPosition: offsetToPosition(docText, node.loc.end),
|
||||
children: node.selectionSet || [],
|
||||
});
|
||||
return {
|
||||
Field: node => {
|
||||
const tokenizedText = node.alias
|
||||
? [buildToken('plain', node.alias), buildToken('plain', ': ')]
|
||||
: [];
|
||||
tokenizedText.push(buildToken('plain', node.name));
|
||||
return {tokenizedText, ...meta(node)};
|
||||
},
|
||||
OperationDefinition: node => ({
|
||||
tokenizedText: [
|
||||
buildToken('keyword', node.operation),
|
||||
buildToken('whitespace', ' '),
|
||||
buildToken('class-name', node.name),
|
||||
],
|
||||
...meta(node),
|
||||
}),
|
||||
Document: node => node.definitions,
|
||||
SelectionSet: node =>
|
||||
concatMap(node.selections, child => {
|
||||
return child.kind === INLINE_FRAGMENT ? child.selectionSet : child;
|
||||
}),
|
||||
Name: node => node.value,
|
||||
FragmentDefinition: node => ({
|
||||
tokenizedText: [
|
||||
buildToken('keyword', 'fragment'),
|
||||
buildToken('whitespace', ' '),
|
||||
buildToken('class-name', node.name),
|
||||
],
|
||||
...meta(node),
|
||||
}),
|
||||
FragmentSpread: node => ({
|
||||
tokenizedText: [
|
||||
buildToken('plain', '...'),
|
||||
buildToken('class-name', node.name),
|
||||
],
|
||||
...meta(node),
|
||||
}),
|
||||
InlineFragment: node => node.selectionSet,
|
||||
};
|
||||
}
|
||||
|
||||
function buildToken(kind: TokenKind, value: string): TextToken {
|
||||
return {kind, value};
|
||||
}
|
||||
|
||||
function concatMap(arr: Array<any>, fn: Function): Array<any> {
|
||||
const res = [];
|
||||
for (let i = 0; i < arr.length; i++) {
|
||||
const x = fn(arr[i], i);
|
||||
if (Array.isArray(x)) {
|
||||
res.push(...x);
|
||||
} else {
|
||||
res.push(x);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) Facebook, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under the license found in the
|
||||
* LICENSE file in the root directory of this source tree.
|
||||
*
|
||||
* @flow
|
||||
*/
|
||||
|
||||
export {
|
||||
getDefinitionState,
|
||||
getFieldDef,
|
||||
forEachState,
|
||||
objectValues,
|
||||
hintList,
|
||||
} from './autocompleteUtils';
|
||||
|
||||
export {getAutocompleteSuggestions} from './getAutocompleteSuggestions';
|
||||
|
||||
export {
|
||||
LANGUAGE,
|
||||
getDefinitionQueryResultForFragmentSpread,
|
||||
getDefinitionQueryResultForDefinitionNode,
|
||||
} from './getDefinition';
|
||||
|
||||
export {getDiagnostics, validateQuery} from './getDiagnostics';
|
||||
export {getOutline} from './getOutline';
|
||||
export {getHoverInformation} from './getHoverInformation';
|
||||
|
||||
export {GraphQLLanguageService} from './GraphQLLanguageService';
|
@ -1,7 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 841.9 595.3">
|
||||
<g fill="#61DAFB">
|
||||
<path d="M666.3 296.5c0-32.5-40.7-63.3-103.1-82.4 14.4-63.6 8-114.2-20.2-130.4-6.5-3.8-14.1-5.6-22.4-5.6v22.3c4.6 0 8.3.9 11.4 2.6 13.6 7.8 19.5 37.5 14.9 75.7-1.1 9.4-2.9 19.3-5.1 29.4-19.6-4.8-41-8.5-63.5-10.9-13.5-18.5-27.5-35.3-41.6-50 32.6-30.3 63.2-46.9 84-46.9V78c-27.5 0-63.5 19.6-99.9 53.6-36.4-33.8-72.4-53.2-99.9-53.2v22.3c20.7 0 51.4 16.5 84 46.6-14 14.7-28 31.4-41.3 49.9-22.6 2.4-44 6.1-63.6 11-2.3-10-4-19.7-5.2-29-4.7-38.2 1.1-67.9 14.6-75.8 3-1.8 6.9-2.6 11.5-2.6V78.5c-8.4 0-16 1.8-22.6 5.6-28.1 16.2-34.4 66.7-19.9 130.1-62.2 19.2-102.7 49.9-102.7 82.3 0 32.5 40.7 63.3 103.1 82.4-14.4 63.6-8 114.2 20.2 130.4 6.5 3.8 14.1 5.6 22.5 5.6 27.5 0 63.5-19.6 99.9-53.6 36.4 33.8 72.4 53.2 99.9 53.2 8.4 0 16-1.8 22.6-5.6 28.1-16.2 34.4-66.7 19.9-130.1 62-19.1 102.5-49.9 102.5-82.3zm-130.2-66.7c-3.7 12.9-8.3 26.2-13.5 39.5-4.1-8-8.4-16-13.1-24-4.6-8-9.5-15.8-14.4-23.4 14.2 2.1 27.9 4.7 41 7.9zm-45.8 106.5c-7.8 13.5-15.8 26.3-24.1 38.2-14.9 1.3-30 2-45.2 2-15.1 0-30.2-.7-45-1.9-8.3-11.9-16.4-24.6-24.2-38-7.6-13.1-14.5-26.4-20.8-39.8 6.2-13.4 13.2-26.8 20.7-39.9 7.8-13.5 15.8-26.3 24.1-38.2 14.9-1.3 30-2 45.2-2 15.1 0 30.2.7 45 1.9 8.3 11.9 16.4 24.6 24.2 38 7.6 13.1 14.5 26.4 20.8 39.8-6.3 13.4-13.2 26.8-20.7 39.9zm32.3-13c5.4 13.4 10 26.8 13.8 39.8-13.1 3.2-26.9 5.9-41.2 8 4.9-7.7 9.8-15.6 14.4-23.7 4.6-8 8.9-16.1 13-24.1zM421.2 430c-9.3-9.6-18.6-20.3-27.8-32 9 .4 18.2.7 27.5.7 9.4 0 18.7-.2 27.8-.7-9 11.7-18.3 22.4-27.5 32zm-74.4-58.9c-14.2-2.1-27.9-4.7-41-7.9 3.7-12.9 8.3-26.2 13.5-39.5 4.1 8 8.4 16 13.1 24 4.7 8 9.5 15.8 14.4 23.4zM420.7 163c9.3 9.6 18.6 20.3 27.8 32-9-.4-18.2-.7-27.5-.7-9.4 0-18.7.2-27.8.7 9-11.7 18.3-22.4 27.5-32zm-74 58.9c-4.9 7.7-9.8 15.6-14.4 23.7-4.6 8-8.9 16-13 24-5.4-13.4-10-26.8-13.8-39.8 13.1-3.1 26.9-5.8 41.2-7.9zm-90.5 125.2c-35.4-15.1-58.3-34.9-58.3-50.6 0-15.7 22.9-35.6 58.3-50.6 8.6-3.7 18-7 27.7-10.1 5.7 19.6 13.2 40 22.5 60.9-9.2 20.8-16.6 41.1-22.2 60.6-9.9-3.1-19.3-6.5-28-10.2zM310 490c-13.6-7.8-19.5-37.5-14.9-75.7 1.1-9.4 2.9-19.3 5.1-29.4 19.6 4.8 41 8.5 63.5 10.9 13.5 18.5 27.5 35.3 41.6 50-32.6 30.3-63.2 46.9-84 46.9-4.5-.1-8.3-1-11.3-2.7zm237.2-76.2c4.7 38.2-1.1 67.9-14.6 75.8-3 1.8-6.9 2.6-11.5 2.6-20.7 0-51.4-16.5-84-46.6 14-14.7 28-31.4 41.3-49.9 22.6-2.4 44-6.1 63.6-11 2.3 10.1 4.1 19.8 5.2 29.1zm38.5-66.7c-8.6 3.7-18 7-27.7 10.1-5.7-19.6-13.2-40-22.5-60.9 9.2-20.8 16.6-41.1 22.2-60.6 9.9 3.1 19.3 6.5 28.1 10.2 35.4 15.1 58.3 34.9 58.3 50.6-.1 15.7-23 35.6-58.4 50.6zM320.8 78.4z"/>
|
||||
<circle cx="420.9" cy="296.5" r="45.7"/>
|
||||
<path d="M520.5 78.1z"/>
|
||||
</g>
|
||||
</svg>
|
Before Width: | Height: | Size: 2.6 KiB |
755
config/allow.list
Normal file
755
config/allow.list
Normal file
@ -0,0 +1,755 @@
|
||||
# http://localhost:8080/
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Protect Ya Neck",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Enter the Wu-Tang",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Wu-Tang",
|
||||
"description": "No description needed"
|
||||
},
|
||||
"product_id": 1
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, update: $update) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
id
|
||||
email
|
||||
picture: avatar
|
||||
products(limit: 2, where: {price: {gt: 10}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Gumbo1",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Gumbo2",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: 199, delete: true) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user {
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"product_id": 5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, delete: true) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "gfk@myspace.com",
|
||||
"full_name": "Ghostface Killah",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Helloo",
|
||||
"description": "World \u003c\u003e"
|
||||
},
|
||||
"user": 123
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: 5, update: $update) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "WOOO",
|
||||
"price": 50.5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query getProducts {
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
deals {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"beer": "smoke"
|
||||
}
|
||||
|
||||
query beerSearch {
|
||||
products(search: $beer) {
|
||||
id
|
||||
name
|
||||
search_rank
|
||||
search_headline_description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "goo1@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "goo12@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": [
|
||||
{
|
||||
"name": "Banana 1",
|
||||
"price": 1.1,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Banana 2",
|
||||
"price": 2.2,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Banana 3",
|
||||
"price": 1.1,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "a2@a.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
price
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name",
|
||||
"description": "my_desc"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(id: 15, update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name",
|
||||
"description": "my_desc"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name 2",
|
||||
"description": "my_desc 2"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"sale_type": "tuutuu",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude1@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
purchase(update: $data, id: 5) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(update: $data, where: {id: {eq: 8}}) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user(where: {id: {eq: 8}}) {
|
||||
id
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "thedude@rug.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user {
|
||||
email
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "booboo@demo.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 6) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "booboo@demo.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
product(id: 6) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude123@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": {
|
||||
"id": 7
|
||||
},
|
||||
"disconnect": {
|
||||
"id": 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(update: $data, id: 6) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5,
|
||||
"email": "test@test.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thed44ude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 6
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Coconut",
|
||||
"price": 2.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Coconut",
|
||||
"price": 2.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5,
|
||||
"email": "test@test.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 2) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}
|
||||
|
||||
|
243
config/dev.yml
Normal file
243
config/dev.yml
Normal file
@ -0,0 +1,243 @@
|
||||
app_name: "Super Graph Development"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: true
|
||||
|
||||
# debug, error, warn, info, none
|
||||
log_level: "debug"
|
||||
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: false
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
auth_fail_block: false
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# Watch the config folder and reload Super Graph
|
||||
# with the new configs when a change is detected
|
||||
reload_on_config_change: true
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./migrations
|
||||
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
secret_key: supercalifajalistics
|
||||
|
||||
# CORS: A list of origins a cross-domain request can be executed from.
|
||||
# If the special * value is present in the list, all origins will be allowed.
|
||||
# An origin may contain a wildcard (*) to replace 0 or more
|
||||
# characters (i.e.: http://*.domain.com).
|
||||
cors_allowed_origins: ["*"]
|
||||
|
||||
# Debug Cross Origin Resource Sharing requests
|
||||
cors_debug: true
|
||||
|
||||
# Default API path prefix is /api you can change it if you like
|
||||
# api_path: "/data"
|
||||
|
||||
# Cache-Control header can help cache queries if your CDN supports cache-control
|
||||
# on POST requests (does not work with not mutations)
|
||||
# cache_control: "public, max-age=300, s-maxage=600"
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
# SG_DATABASE_USER
|
||||
# SG_DATABASE_PASSWORD
|
||||
|
||||
# Auth related environment Variables
|
||||
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
|
||||
# SG_AUTH_RAILS_REDIS_URL
|
||||
# SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
# SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
|
||||
# inflections:
|
||||
# person: people
|
||||
# sheep: sheep
|
||||
|
||||
# open opencensus tracing and metrics
|
||||
# telemetry:
|
||||
# debug: true
|
||||
# metrics:
|
||||
# exporter: "prometheus"
|
||||
# tracing:
|
||||
# exporter: "zipkin"
|
||||
# endpoint: "http://zipkin:9411/api/v2/spans"
|
||||
# sample: 0.2
|
||||
# include_query: false
|
||||
# include_params: false
|
||||
|
||||
auth:
|
||||
# Can be 'rails' or 'jwt'
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header for testing.
|
||||
# Disable in production
|
||||
creds_in_header: true
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
# Remote cookie store. (memcache or redis)
|
||||
# url: redis://redis:6379
|
||||
# password: ""
|
||||
# max_idle: 80
|
||||
# max_active: 12000
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
# sign_salt: "signed encrypted cookie"
|
||||
# auth_salt: "authenticated encrypted cookie"
|
||||
|
||||
# jwt:
|
||||
# provider: auth0
|
||||
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
# public_key_file: /secrets/public_key.pem
|
||||
# public_key_type: ecdsa #rsa
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: app_development
|
||||
user: postgres
|
||||
password: postgres
|
||||
|
||||
#schema: "public"
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
admin_account_id: "5"
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
tables:
|
||||
- name: customers
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
pass_headers:
|
||||
- cookie
|
||||
set_headers:
|
||||
- name: Host
|
||||
value: 0.0.0.0
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
|
||||
- name: deals
|
||||
table: products
|
||||
|
||||
- name: users
|
||||
columns:
|
||||
- name: email
|
||||
related_to: products.name
|
||||
|
||||
roles_query: "SELECT * FROM users WHERE id = $user_id"
|
||||
|
||||
roles:
|
||||
- name: anon
|
||||
tables:
|
||||
- name: products
|
||||
query:
|
||||
limit: 10
|
||||
columns: ["id", "name", "description"]
|
||||
aggregation: false
|
||||
|
||||
insert:
|
||||
block: false
|
||||
|
||||
update:
|
||||
block: false
|
||||
|
||||
delete:
|
||||
block: false
|
||||
|
||||
- name: deals
|
||||
query:
|
||||
limit: 3
|
||||
aggregation: false
|
||||
|
||||
- name: purchases
|
||||
query:
|
||||
limit: 3
|
||||
aggregation: false
|
||||
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
query:
|
||||
limit: 50
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
disable_functions: false
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
presets:
|
||||
- user_id: "$user_id"
|
||||
- created_at: "now"
|
||||
- updated_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
presets:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: id = 1000
|
||||
tables:
|
||||
- name: users
|
||||
filters: []
|
76
config/prod.yml
Normal file
76
config/prod.yml
Normal file
@ -0,0 +1,76 @@
|
||||
# Inherit config from this other config file
|
||||
# so I only need to overwrite some values
|
||||
inherits: dev
|
||||
|
||||
app_name: "Super Graph Production"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: false
|
||||
|
||||
# debug, error, warn, info, none
|
||||
log_level: "info"
|
||||
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: true
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
auth_fail_block: true
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
# migrations_path: ./migrations
|
||||
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
# secret_key: supercalifajalistics
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
# SG_DATABASE_USER
|
||||
# SG_DATABASE_PASSWORD
|
||||
|
||||
# Auth related environment Variables
|
||||
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
|
||||
# SG_AUTH_RAILS_REDIS_URL
|
||||
# SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
# SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: app_production
|
||||
user: postgres
|
||||
password: postgres
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 5m
|
||||
# open opencensus tracing and metrics
|
||||
# telemetry:
|
||||
# debug: false
|
||||
# metrics:
|
||||
# exporter: "prometheus"
|
||||
# tracing:
|
||||
# exporter: "zipkin"
|
||||
# endpoint: "http://zipkin:9411/api/v2/spans"
|
||||
# sample: 0.6
|
116
config/seed.js
Normal file
116
config/seed.js
Normal file
@ -0,0 +1,116 @@
|
||||
var user_count = 10
|
||||
customer_count = 100
|
||||
product_count = 50
|
||||
purchase_count = 100
|
||||
|
||||
var users = []
|
||||
customers = []
|
||||
products = []
|
||||
|
||||
for (i = 0; i < user_count; i++) {
|
||||
var pwd = fake.password()
|
||||
var data = {
|
||||
full_name: fake.name(),
|
||||
avatar: fake.avatar_url(200),
|
||||
phone: fake.phone(),
|
||||
email: fake.email(),
|
||||
password: pwd,
|
||||
password_confirmation: pwd,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
user(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
|
||||
users.push(res.user)
|
||||
}
|
||||
|
||||
for (i = 0; i < product_count; i++) {
|
||||
var n = Math.floor(Math.random() * users.length)
|
||||
var user = users[n]
|
||||
|
||||
var desc = [
|
||||
fake.beer_style(),
|
||||
fake.beer_hop(),
|
||||
fake.beer_yeast(),
|
||||
fake.beer_ibu(),
|
||||
fake.beer_alcohol(),
|
||||
fake.beer_blg(),
|
||||
].join(", ")
|
||||
|
||||
var data = {
|
||||
name: fake.beer_name(),
|
||||
description: desc,
|
||||
price: fake.price()
|
||||
//user_id: user.id,
|
||||
//created_at: "now",
|
||||
//updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
product(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data }, {
|
||||
user_id: 5
|
||||
})
|
||||
products.push(res.product)
|
||||
}
|
||||
|
||||
for (i = 0; i < customer_count; i++) {
|
||||
var pwd = fake.password()
|
||||
|
||||
var data = {
|
||||
stripe_id: "CUS-" + fake.uuid(),
|
||||
full_name: fake.name(),
|
||||
phone: fake.phone(),
|
||||
email: fake.email(),
|
||||
password: pwd,
|
||||
password_confirmation: pwd,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
customer(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
customers.push(res.customer)
|
||||
}
|
||||
|
||||
for (i = 0; i < purchase_count; i++) {
|
||||
var sale_type = fake.rand_string(["rented", "bought"])
|
||||
|
||||
if (sale_type === "rented") {
|
||||
var due_date = fake.date()
|
||||
var returned = fake.date()
|
||||
}
|
||||
|
||||
var data = {
|
||||
customer_id: customers[Math.floor(Math.random() * customer_count)].id,
|
||||
product_id: products[Math.floor(Math.random() * product_count)].id,
|
||||
sale_type: sale_type,
|
||||
quantity: Math.floor(Math.random() * 10),
|
||||
due_date: due_date,
|
||||
returned: returned,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
purchase(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
|
||||
console.log(res)
|
||||
}
|
84
core/api.go
84
core/api.go
@ -16,17 +16,12 @@
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
conf, err := core.ReadInConfig("./config/dev.yml")
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
}
|
||||
|
||||
sg, err = core.NewSuperGraph(conf, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
@ -37,9 +32,11 @@
|
||||
}
|
||||
}`
|
||||
|
||||
res, err := sg.GraphQL(context.Background(), query, nil)
|
||||
ctx = context.WithValue(ctx, core.UserIDKey, 1)
|
||||
|
||||
res, err := sg.GraphQL(ctx, query, nil)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
@ -52,9 +49,11 @@ import (
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"hash/maphash"
|
||||
_log "log"
|
||||
"os"
|
||||
|
||||
"github.com/chirino/graphql"
|
||||
"github.com/dosco/super-graph/core/internal/allow"
|
||||
"github.com/dosco/super-graph/core/internal/crypto"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
@ -81,25 +80,40 @@ type SuperGraph struct {
|
||||
conf *Config
|
||||
db *sql.DB
|
||||
log *_log.Logger
|
||||
dbinfo *psql.DBInfo
|
||||
schema *psql.DBSchema
|
||||
allowList *allow.List
|
||||
encKey [32]byte
|
||||
prepared map[string]*preparedItem
|
||||
hashSeed maphash.Seed
|
||||
queries map[uint64]*query
|
||||
roles map[string]*Role
|
||||
getRole *sql.Stmt
|
||||
rmap map[uint64]*resolvFn
|
||||
abacEnabled bool
|
||||
anonExists bool
|
||||
qc *qcode.Compiler
|
||||
pc *psql.Compiler
|
||||
ge *graphql.Engine
|
||||
}
|
||||
|
||||
// NewSuperGraph creates the SuperGraph struct, this involves querying the database to learn its
|
||||
// schemas and relationships
|
||||
func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||
return newSuperGraph(conf, db, nil)
|
||||
}
|
||||
|
||||
// newSuperGraph helps with writing tests and benchmarks
|
||||
func newSuperGraph(conf *Config, db *sql.DB, dbinfo *psql.DBInfo) (*SuperGraph, error) {
|
||||
if conf == nil {
|
||||
conf = &Config{}
|
||||
}
|
||||
|
||||
sg := &SuperGraph{
|
||||
conf: conf,
|
||||
db: db,
|
||||
dbinfo: dbinfo,
|
||||
log: _log.New(os.Stdout, "", 0),
|
||||
hashSeed: maphash.MakeSeed(),
|
||||
}
|
||||
|
||||
if err := sg.initConfig(); err != nil {
|
||||
@ -118,6 +132,14 @@ func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initResolvers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initGraphQLEgine(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(conf.SecretKey) != 0 {
|
||||
sk := sha256.Sum256([]byte(conf.SecretKey))
|
||||
conf.SecretKey = ""
|
||||
@ -149,7 +171,24 @@ type Result struct {
|
||||
// In developer mode all names queries are saved into a file `allow.list` and in production mode only
|
||||
// queries from this file can be run.
|
||||
func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMessage) (*Result, error) {
|
||||
ct := scontext{Context: c, sg: sg, query: query, vars: vars}
|
||||
var res Result
|
||||
|
||||
res.op = qcode.GetQType(query)
|
||||
res.name = allow.QueryName(query)
|
||||
|
||||
// use the chirino/graphql library for introspection queries
|
||||
// disabled when allow list is enforced
|
||||
if !sg.conf.UseAllowList && res.name == "IntrospectionQuery" {
|
||||
r := sg.ge.ServeGraphQL(&graphql.Request{Query: query})
|
||||
res.Data = r.Data
|
||||
|
||||
if r.Error() != nil {
|
||||
res.Error = r.Error().Error()
|
||||
}
|
||||
return &res, r.Error()
|
||||
}
|
||||
|
||||
ct := scontext{Context: c, sg: sg, query: query, vars: vars, res: res}
|
||||
|
||||
if len(vars) <= 2 {
|
||||
ct.vars = nil
|
||||
@ -161,9 +200,6 @@ func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMess
|
||||
ct.role = "anon"
|
||||
}
|
||||
|
||||
ct.res.op = qcode.GetQType(query)
|
||||
ct.res.name = allow.QueryName(query)
|
||||
|
||||
data, err := ct.execQuery()
|
||||
if err != nil {
|
||||
return &ct.res, err
|
||||
@ -173,3 +209,21 @@ func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMess
|
||||
|
||||
return &ct.res, nil
|
||||
}
|
||||
|
||||
// GraphQLSchema function return the GraphQL schema for the underlying database connected
|
||||
// to this instance of Super Graph
|
||||
func (sg *SuperGraph) GraphQLSchema() (string, error) {
|
||||
return sg.ge.Schema.String(), nil
|
||||
}
|
||||
|
||||
// Operation function return the operation type from the query. It uses a very fast algorithm to
|
||||
// extract the operation without having to parse the query.
|
||||
func Operation(query string) OpType {
|
||||
return OpType(qcode.GetQType(query))
|
||||
}
|
||||
|
||||
// Name function return the operation name from the query. It uses a very fast algorithm to
|
||||
// extract the operation name without having to parse the query.
|
||||
func Name(query string) string {
|
||||
return allow.QueryName(query)
|
||||
}
|
||||
|
62
core/api_test.go
Normal file
62
core/api_test.go
Normal file
@ -0,0 +1,62 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
func BenchmarkGraphQL(b *testing.B) {
|
||||
ct := context.WithValue(context.Background(), UserIDKey, "1")
|
||||
|
||||
db, _, err := sqlmock.New()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// mock.ExpectQuery(`^SELECT jsonb_build_object`).WithArgs()
|
||||
c := &Config{}
|
||||
sg, err := newSuperGraph(c, db, psql.GetTestDBInfo())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user {
|
||||
full_name
|
||||
phone
|
||||
email
|
||||
}
|
||||
customers {
|
||||
id
|
||||
email
|
||||
}
|
||||
}
|
||||
users {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err = sg.GraphQL(ct, query, nil)
|
||||
}
|
||||
})
|
||||
|
||||
fmt.Println(err)
|
||||
|
||||
//fmt.Println(mock.ExpectationsWereMet())
|
||||
|
||||
}
|
128
core/args.go
128
core/args.go
@ -1,67 +1,18 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
func (c *scontext) argMap() func(w io.Writer, tag string) (int, error) {
|
||||
return func(w io.Writer, tag string) (int, error) {
|
||||
switch tag {
|
||||
case "user_id_provider":
|
||||
if v := c.Value(UserIDProviderKey); v != nil {
|
||||
return io.WriteString(w, v.(string))
|
||||
}
|
||||
return 0, argErr("user_id_provider")
|
||||
// argList function is used to create a list of arguments to pass
|
||||
// to a prepared statement.
|
||||
|
||||
case "user_id":
|
||||
if v := c.Value(UserIDKey); v != nil {
|
||||
return io.WriteString(w, v.(string))
|
||||
}
|
||||
return 0, argErr("user_id")
|
||||
|
||||
case "user_role":
|
||||
if v := c.Value(UserRoleKey); v != nil {
|
||||
return io.WriteString(w, v.(string))
|
||||
}
|
||||
return 0, argErr("user_role")
|
||||
}
|
||||
|
||||
fields := jsn.Get(c.vars, [][]byte{[]byte(tag)})
|
||||
|
||||
if len(fields) == 0 {
|
||||
return 0, argErr(tag)
|
||||
|
||||
}
|
||||
v := fields[0].Value
|
||||
|
||||
// Open and close quotes
|
||||
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
|
||||
fields[0].Value = v[1 : len(v)-1]
|
||||
}
|
||||
|
||||
if tag == "cursor" {
|
||||
if bytes.EqualFold(v, []byte("null")) {
|
||||
return io.WriteString(w, ``)
|
||||
}
|
||||
v1, err := c.sg.decrypt(string(fields[0].Value))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return w.Write(v1)
|
||||
}
|
||||
|
||||
return w.Write(escQuote(fields[0].Value))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
vars := make([]interface{}, len(args))
|
||||
func (c *scontext) argList(md psql.Metadata) ([]interface{}, error) {
|
||||
vars := make([]interface{}, len(md.Params))
|
||||
|
||||
var fields map[string]json.RawMessage
|
||||
var err error
|
||||
@ -74,31 +25,30 @@ func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
av := args[i]
|
||||
switch {
|
||||
case bytes.Equal(av, []byte("user_id")):
|
||||
for i, p := range md.Params {
|
||||
switch p.Name {
|
||||
case "user_id":
|
||||
if v := c.Value(UserIDKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr("user_id")
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
case bytes.Equal(av, []byte("user_id_provider")):
|
||||
case "user_id_provider":
|
||||
if v := c.Value(UserIDProviderKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr("user_id_provider")
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
case bytes.Equal(av, []byte("user_role")):
|
||||
case "user_role":
|
||||
if v := c.Value(UserRoleKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr("user_role")
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
case bytes.Equal(av, []byte("cursor")):
|
||||
case "cursor":
|
||||
if v, ok := fields["cursor"]; ok && v[0] == '"' {
|
||||
v1, err := c.sg.decrypt(string(v[1 : len(v)-1]))
|
||||
if err != nil {
|
||||
@ -106,25 +56,33 @@ func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
}
|
||||
vars[i] = v1
|
||||
} else {
|
||||
return nil, argErr("cursor")
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
default:
|
||||
if v, ok := fields[string(av)]; ok {
|
||||
if v, ok := fields[p.Name]; ok {
|
||||
switch {
|
||||
case p.IsArray && v[0] != '[':
|
||||
return nil, fmt.Errorf("variable '%s' should be an array of type '%s'", p.Name, p.Type)
|
||||
|
||||
case p.Type == "json" && v[0] != '[' && v[0] != '{':
|
||||
return nil, fmt.Errorf("variable '%s' should be an array or object", p.Name)
|
||||
}
|
||||
|
||||
switch v[0] {
|
||||
case '[', '{':
|
||||
vars[i] = escQuote(v)
|
||||
vars[i] = v
|
||||
|
||||
default:
|
||||
var val interface{}
|
||||
if err := json.Unmarshal(v, &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vars[i] = val
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, argErr(string(av))
|
||||
return nil, argErr(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -132,34 +90,6 @@ func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
return vars, nil
|
||||
}
|
||||
|
||||
func escQuote(b []byte) []byte {
|
||||
f := false
|
||||
for i := range b {
|
||||
if b[i] == '\'' {
|
||||
f = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !f {
|
||||
return b
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
s := 0
|
||||
for i := range b {
|
||||
if b[i] == '\'' {
|
||||
buf.Write(b[s:i])
|
||||
buf.WriteString(`''`)
|
||||
s = i + 1
|
||||
}
|
||||
}
|
||||
l := len(b)
|
||||
if s < (l - 1) {
|
||||
buf.Write(b[s:l])
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func argErr(name string) error {
|
||||
return fmt.Errorf("query requires variable '%s' to be set", name)
|
||||
func argErr(p psql.Param) error {
|
||||
return fmt.Errorf("required variable '%s' of type '%s' must be set", p.Name, p.Type)
|
||||
}
|
||||
|
41
core/bench.11
Normal file
41
core/bench.11
Normal file
@ -0,0 +1,41 @@
|
||||
INF roles_query not defined: attribute based access control disabled
|
||||
all expectations were already fulfilled, call to Query 'SELECT jsonb_build_object('users', "__sj_0"."json", 'products', "__sj_1"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "__sj_2"."json" AS "customers", "__sj_3"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_1" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "users_3"."full_name" AS "full_name", "users_3"."phone" AS "phone", "users_3"."email" AS "email" FROM (SELECT "users"."full_name", "users"."phone", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_1"."user_id"))) LIMIT ('1') :: integer) AS "users_3") AS "__sr_3") AS "__sj_3" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_1"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1") AS "__sj_1", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."name" AS "name" FROM (SELECT "users"."id" FROM "users" GROUP BY "users"."id" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"' with args [] was not expected
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core
|
||||
BenchmarkGraphQL-16 INF roles_query not defined: attribute based access control disabled
|
||||
all expectations were already fulfilled, call to Query 'SELECT jsonb_build_object('users', "__sj_0"."json", 'products', "__sj_1"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "__sj_2"."json" AS "customers", "__sj_3"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_1" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "users_3"."full_name" AS "full_name", "users_3"."phone" AS "phone", "users_3"."email" AS "email" FROM (SELECT "users"."full_name", "users"."phone", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_1"."user_id"))) LIMIT ('1') :: integer) AS "users_3") AS "__sr_3") AS "__sj_3" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_1"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1") AS "__sj_1", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."name" AS "name" FROM (SELECT "users"."id" FROM "users" GROUP BY "users"."id" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"' with args [] was not expected
|
||||
INF roles_query not defined: attribute based access control disabled
|
||||
all expectations were already fulfilled, call to Query 'SELECT jsonb_build_object('users', "__sj_0"."json", 'products', "__sj_1"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "__sj_2"."json" AS "customers", "__sj_3"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_1" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "users_3"."full_name" AS "full_name", "users_3"."phone" AS "phone", "users_3"."email" AS "email" FROM (SELECT "users"."full_name", "users"."phone", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_1"."user_id"))) LIMIT ('1') :: integer) AS "users_3") AS "__sr_3") AS "__sj_3" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_1"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1") AS "__sj_1", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."name" AS "name" FROM (SELECT "users"."id" FROM "users" GROUP BY "users"."id" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"' with args [] was not expected
|
||||
INF roles_query not defined: attribute based access control disabled
|
||||
all expectations were already fulfilled, call to Query 'SELECT jsonb_build_object('users', "__sj_0"."json", 'products', "__sj_1"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "__sj_2"."json" AS "customers", "__sj_3"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_1" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "users_3"."full_name" AS "full_name", "users_3"."phone" AS "phone", "users_3"."email" AS "email" FROM (SELECT "users"."full_name", "users"."phone", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_1"."user_id"))) LIMIT ('1') :: integer) AS "users_3") AS "__sr_3") AS "__sj_3" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_1"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1") AS "__sj_1", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."name" AS "name" FROM (SELECT "users"."id" FROM "users" GROUP BY "users"."id" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"' with args [] was not expected
|
||||
105048 10398 ns/op 18342 B/op 55 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core 1.328s
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/allow 0.088s
|
||||
? github.com/dosco/super-graph/core/internal/crypto [no test files]
|
||||
? github.com/dosco/super-graph/core/internal/integration_tests [no test files]
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/integration_tests/cockroachdb 0.121s
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/integration_tests/postgresql 0.118s
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/psql
|
||||
BenchmarkCompile-16 79845 14428 ns/op 4584 B/op 39 allocs/op
|
||||
BenchmarkCompileParallel-16 326205 3918 ns/op 4633 B/op 39 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/psql 2.696s
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile-16 146953 8049 ns/op 3756 B/op 28 allocs/op
|
||||
BenchmarkQCompileP-16 475936 2447 ns/op 3790 B/op 28 allocs/op
|
||||
BenchmarkParse-16 140811 8163 ns/op 3902 B/op 18 allocs/op
|
||||
BenchmarkParseP-16 571345 2041 ns/op 3903 B/op 18 allocs/op
|
||||
BenchmarkSchemaParse-16 230715 5012 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP-16 802426 1565 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 8.427s
|
||||
? github.com/dosco/super-graph/core/internal/util [no test files]
|
@ -14,7 +14,7 @@ import (
|
||||
type stmt struct {
|
||||
role *Role
|
||||
qc *qcode.QCode
|
||||
skipped uint32
|
||||
md psql.Metadata
|
||||
sql string
|
||||
}
|
||||
|
||||
@ -62,12 +62,11 @@ func (sg *SuperGraph) buildRoleStmt(query, vars []byte, role string) ([]stmt, er
|
||||
stmts := []stmt{stmt{role: ro, qc: qc}}
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
skipped, err := sg.pc.Compile(qc, w, psql.Variables(vm))
|
||||
stmts[0].md, err = sg.pc.Compile(w, qc, psql.Variables(vm))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmts[0].skipped = skipped
|
||||
stmts[0].sql = w.String()
|
||||
|
||||
return stmts, nil
|
||||
@ -104,14 +103,13 @@ func (sg *SuperGraph) buildMultiStmt(query, vars []byte) ([]stmt, error) {
|
||||
}
|
||||
|
||||
stmts = append(stmts, stmt{role: role, qc: qc})
|
||||
s := &stmts[len(stmts)-1]
|
||||
|
||||
skipped, err := sg.pc.Compile(qc, w, psql.Variables(vm))
|
||||
s.md, err = sg.pc.Compile(w, qc, psql.Variables(vm))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &stmts[len(stmts)-1]
|
||||
s.skipped = skipped
|
||||
s.sql = w.String()
|
||||
w.Reset()
|
||||
}
|
||||
@ -167,16 +165,16 @@ func (sg *SuperGraph) renderUserQuery(stmts []stmt) (string, error) {
|
||||
return w.String(), nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) hasTablesWithConfig(qc *qcode.QCode, role *Role) bool {
|
||||
for _, id := range qc.Roots {
|
||||
t, err := sg.schema.GetTable(qc.Selects[id].Name)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// func (sg *SuperGraph) hasTablesWithConfig(qc *qcode.QCode, role *Role) bool {
|
||||
// for _, id := range qc.Roots {
|
||||
// t, err := sg.schema.GetTable(qc.Selects[id].Name)
|
||||
// if err != nil {
|
||||
// return false
|
||||
// }
|
||||
|
||||
if r := role.GetTable(t.Name); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
// if r := role.GetTable(t.Name); r == nil {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
// return true
|
||||
// }
|
||||
|
110
core/config.go
110
core/config.go
@ -3,6 +3,7 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
@ -10,16 +11,61 @@ import (
|
||||
|
||||
// Core struct contains core specific config value
|
||||
type Config struct {
|
||||
// SecretKey is used to encrypt opaque values such as
|
||||
// the cursor. Auto-generated if not set
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
|
||||
// UseAllowList (aka production mode) when set to true ensures
|
||||
// only queries lists in the allow.list file can be used. All
|
||||
// queries are pre-prepared so no compiling happens and things are
|
||||
// very fast.
|
||||
UseAllowList bool `mapstructure:"use_allow_list"`
|
||||
|
||||
// AllowListFile if the path to allow list file if not set the
|
||||
// path is assumed to tbe the same as the config path (allow.list)
|
||||
AllowListFile string `mapstructure:"allow_list_file"`
|
||||
|
||||
// SetUserID forces the database session variable `user.id` to
|
||||
// be set to the user id. This variables can be used by triggers
|
||||
// or other database functions
|
||||
SetUserID bool `mapstructure:"set_user_id"`
|
||||
|
||||
// DefaultBlock ensures that in anonymous mode (role 'anon') all tables
|
||||
// are blocked from queries and mutations. To open access to tables in
|
||||
// anonymous mode they have to be added to the 'anon' role config.
|
||||
DefaultBlock bool `mapstructure:"default_block"`
|
||||
|
||||
// Vars is a map of hardcoded variables that can be leveraged in your
|
||||
// queries (eg variable admin_id will be $admin_id in the query)
|
||||
Vars map[string]string `mapstructure:"variables"`
|
||||
|
||||
// Blocklist is a list of tables and columns that should be filtered
|
||||
// out from any and all queries
|
||||
Blocklist []string
|
||||
|
||||
// Tables contains all table specific configuration such as aliased tables
|
||||
// creating relationships between tables, etc
|
||||
Tables []Table
|
||||
|
||||
// RolesQuery if set enabled attributed based access control. This query
|
||||
// is use to fetch the user attributes that then dynamically define the users
|
||||
// role.
|
||||
RolesQuery string `mapstructure:"roles_query"`
|
||||
|
||||
// Roles contains all the configuration for all the roles you want to support
|
||||
// `user` and `anon` are two default roles. User role is for when a user ID is
|
||||
// available and Anon when it's not.
|
||||
//
|
||||
// If you're using the RolesQuery config to enable atribute based acess control then
|
||||
// you can add more custom roles.
|
||||
Roles []Role
|
||||
Inflections map[string]string
|
||||
|
||||
// Inflections is to add additionally singular to plural mappings
|
||||
// to the engine (eg. sheep: sheep)
|
||||
Inflections map[string]string `mapstructure:"inflections"`
|
||||
|
||||
// Database schema name. Defaults to 'public'
|
||||
DBSchema string `mapstructure:"db_schema"`
|
||||
}
|
||||
|
||||
// Table struct defines a database table
|
||||
@ -63,11 +109,12 @@ type Role struct {
|
||||
// RoleTable struct contains role specific access control values for a database table
|
||||
type RoleTable struct {
|
||||
Name string
|
||||
ReadOnly bool `mapstructure:"read_only"`
|
||||
|
||||
Query Query
|
||||
Insert Insert
|
||||
Update Update
|
||||
Delete Delete
|
||||
Query *Query
|
||||
Insert *Insert
|
||||
Update *Update
|
||||
Delete *Delete
|
||||
}
|
||||
|
||||
// Query struct contains access control values for query operations
|
||||
@ -102,6 +149,51 @@ type Delete struct {
|
||||
Block bool
|
||||
}
|
||||
|
||||
// AddRoleTable function is a helper function to make it easy to add per-table
|
||||
// row-level config
|
||||
func (c *Config) AddRoleTable(role string, table string, conf interface{}) error {
|
||||
var r *Role
|
||||
|
||||
for i := range c.Roles {
|
||||
if strings.EqualFold(c.Roles[i].Name, role) {
|
||||
r = &c.Roles[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if r == nil {
|
||||
nr := Role{Name: role}
|
||||
c.Roles = append(c.Roles, nr)
|
||||
r = &nr
|
||||
}
|
||||
|
||||
var t *RoleTable
|
||||
for i := range r.Tables {
|
||||
if strings.EqualFold(r.Tables[i].Name, table) {
|
||||
t = &r.Tables[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
nt := RoleTable{Name: table}
|
||||
r.Tables = append(r.Tables, nt)
|
||||
t = &nt
|
||||
}
|
||||
|
||||
switch v := conf.(type) {
|
||||
case Query:
|
||||
t.Query = &v
|
||||
case Insert:
|
||||
t.Insert = &v
|
||||
case Update:
|
||||
t.Update = &v
|
||||
case Delete:
|
||||
t.Delete = &v
|
||||
default:
|
||||
return fmt.Errorf("unsupported object type: %t", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadInConfig function reads in the config file for the environment specified in the GO_ENV
|
||||
// environment variable. This is the best way to create a new Super Graph config.
|
||||
func ReadInConfig(configFile string) (*Config, error) {
|
||||
@ -115,7 +207,7 @@ func ReadInConfig(configFile string) (*Config, error) {
|
||||
|
||||
inherits := vi.GetString("inherits")
|
||||
|
||||
if len(inherits) != 0 {
|
||||
if inherits != "" {
|
||||
vi = newViper(cpath, inherits)
|
||||
|
||||
if err := vi.ReadInConfig(); err != nil {
|
||||
@ -141,7 +233,7 @@ func ReadInConfig(configFile string) (*Config, error) {
|
||||
return nil, fmt.Errorf("failed to decode config, %v", err)
|
||||
}
|
||||
|
||||
if len(c.AllowListFile) == 0 {
|
||||
if c.AllowListFile == "" {
|
||||
c.AllowListFile = path.Join(cpath, "allow.list")
|
||||
}
|
||||
|
||||
@ -155,9 +247,13 @@ func newViper(configPath, configFile string) *viper.Viper {
|
||||
vi.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
vi.AutomaticEnv()
|
||||
|
||||
if filepath.Ext(configFile) != "" {
|
||||
vi.SetConfigFile(configFile)
|
||||
} else {
|
||||
vi.SetConfigName(configFile)
|
||||
vi.AddConfigPath(configPath)
|
||||
vi.AddConfigPath("./config")
|
||||
}
|
||||
|
||||
return vi
|
||||
}
|
||||
|
@ -5,11 +5,6 @@ import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
openVar = "{{"
|
||||
closeVar = "}}"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotFound = errors.New("not found in prepared statements")
|
||||
)
|
||||
|
125
core/core.go
125
core/core.go
@ -1,17 +1,22 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"time"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
"github.com/valyala/fasttemplate"
|
||||
type OpType int
|
||||
|
||||
const (
|
||||
OpQuery OpType = iota
|
||||
OpMutation
|
||||
)
|
||||
|
||||
type extensions struct {
|
||||
@ -50,25 +55,43 @@ type scontext struct {
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initCompilers() error {
|
||||
di, err := psql.GetDBInfo(sg.db)
|
||||
var err error
|
||||
var schema string
|
||||
|
||||
if sg.conf.DBSchema == "" {
|
||||
schema = "public"
|
||||
} else {
|
||||
schema = sg.conf.DBSchema
|
||||
}
|
||||
|
||||
// If sg.di is not null then it's probably set
|
||||
// for tests
|
||||
if sg.dbinfo == nil {
|
||||
sg.dbinfo, err = psql.GetDBInfo(sg.db, schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = addTables(sg.conf, di); err != nil {
|
||||
if len(sg.dbinfo.Tables) == 0 {
|
||||
return fmt.Errorf("no tables found in database (schema: %s)", schema)
|
||||
}
|
||||
|
||||
if err = addTables(sg.conf, sg.dbinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = addForeignKeys(sg.conf, di); err != nil {
|
||||
if err = addForeignKeys(sg.conf, sg.dbinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.schema, err = psql.NewDBSchema(di, getDBTableAliases(sg.conf))
|
||||
sg.schema, err = psql.NewDBSchema(sg.dbinfo, getDBTableAliases(sg.conf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.qc, err = qcode.NewCompiler(qcode.Config{
|
||||
DefaultBlock: sg.conf.DefaultBlock,
|
||||
Blocklist: sg.conf.Blocklist,
|
||||
})
|
||||
if err != nil {
|
||||
@ -89,25 +112,25 @@ func (sg *SuperGraph) initCompilers() error {
|
||||
|
||||
func (c *scontext) execQuery() ([]byte, error) {
|
||||
var data []byte
|
||||
// var st *stmt
|
||||
var st *stmt
|
||||
var err error
|
||||
|
||||
if c.sg.conf.UseAllowList {
|
||||
data, _, err = c.resolvePreparedSQL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, st, err = c.resolvePreparedSQL()
|
||||
} else {
|
||||
data, _, err = c.resolveSQL()
|
||||
data, st, err = c.resolveSQL()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) == 0 || st.md.Skipped == 0 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return data, nil
|
||||
|
||||
//return execRemoteJoin(st, data, c.req.hdr)
|
||||
// return c.sg.execRemoteJoin(st, data, c.req.hdr)
|
||||
return c.sg.execRemoteJoin(st, data, nil)
|
||||
}
|
||||
|
||||
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
||||
@ -143,32 +166,43 @@ func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
||||
|
||||
} else {
|
||||
role = c.role
|
||||
|
||||
}
|
||||
|
||||
c.res.role = role
|
||||
|
||||
ps, ok := c.sg.prepared[stmtHash(c.res.name, role)]
|
||||
h := maphash.Hash{}
|
||||
h.SetSeed(c.sg.hashSeed)
|
||||
|
||||
q, ok := c.sg.queries[queryID(&h, c.res.name, role)]
|
||||
if !ok {
|
||||
return nil, nil, errNotFound
|
||||
}
|
||||
c.res.sql = ps.st.sql
|
||||
|
||||
if q.sd == nil {
|
||||
q.Do(func() { c.sg.prepare(q, role) })
|
||||
|
||||
if q.err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
c.res.sql = q.st.sql
|
||||
|
||||
var root []byte
|
||||
var row *sql.Row
|
||||
|
||||
varsList, err := c.argList(ps.args)
|
||||
varsList, err := c.argList(q.st.md)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if useTx {
|
||||
row = tx.Stmt(ps.sd).QueryRow(varsList...)
|
||||
row = tx.Stmt(q.sd).QueryRow(varsList...)
|
||||
} else {
|
||||
row = ps.sd.QueryRow(varsList...)
|
||||
row = q.sd.QueryRow(varsList...)
|
||||
}
|
||||
|
||||
if ps.roleArg {
|
||||
if q.roleArg {
|
||||
err = row.Scan(&role, &root)
|
||||
} else {
|
||||
err = row.Scan(&root)
|
||||
@ -182,15 +216,15 @@ func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
||||
|
||||
if useTx {
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, q.err
|
||||
}
|
||||
}
|
||||
|
||||
if root, err = c.sg.encryptCursor(ps.st.qc, root); err != nil {
|
||||
if root, err = c.sg.encryptCursor(q.st.qc, root); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return root, &ps.st, nil
|
||||
return root, &q.st, nil
|
||||
}
|
||||
|
||||
func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
|
||||
@ -228,15 +262,23 @@ func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
st := &stmts[0]
|
||||
c.res.sql = st.sql
|
||||
|
||||
t := fasttemplate.New(st.sql, openVar, closeVar)
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
_, err = t.ExecuteFunc(buf, c.argMap())
|
||||
varList, err := c.argList(st.md)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
finalSQL := buf.String()
|
||||
// finalSQL := buf.String()
|
||||
|
||||
////
|
||||
|
||||
// _, err = t.ExecuteFunc(buf, c.argMap(st.md))
|
||||
// if err != nil {
|
||||
// return nil, nil, err
|
||||
// }
|
||||
// finalSQL := buf.String()
|
||||
|
||||
/////
|
||||
|
||||
// var stime time.Time
|
||||
|
||||
@ -251,9 +293,9 @@ func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
|
||||
// defaultRole := c.role
|
||||
|
||||
if useTx {
|
||||
row = tx.QueryRow(finalSQL)
|
||||
row = tx.QueryRowContext(c, st.sql, varList...)
|
||||
} else {
|
||||
row = c.sg.db.QueryRow(finalSQL)
|
||||
row = c.sg.db.QueryRowContext(c, st.sql, varList...)
|
||||
}
|
||||
|
||||
if len(stmts) > 1 {
|
||||
@ -262,8 +304,6 @@ func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
|
||||
err = row.Scan(&root)
|
||||
}
|
||||
|
||||
c.res.sql = finalSQL
|
||||
|
||||
if len(role) == 0 {
|
||||
c.res.role = c.role
|
||||
} else {
|
||||
@ -322,7 +362,20 @@ func (c *scontext) executeRoleQuery(tx *sql.Tx) (string, error) {
|
||||
return role, nil
|
||||
}
|
||||
|
||||
func (r *Result) Operation() string {
|
||||
func (r *Result) Operation() OpType {
|
||||
switch r.op {
|
||||
case qcode.QTQuery:
|
||||
return OpQuery
|
||||
|
||||
case qcode.QTMutation, qcode.QTInsert, qcode.QTUpdate, qcode.QTUpsert, qcode.QTDelete:
|
||||
return OpMutation
|
||||
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Result) OperationName() string {
|
||||
return r.op.String()
|
||||
}
|
||||
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/crypto"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
func (sg *SuperGraph) encryptCursor(qc *qcode.QCode, data []byte) ([]byte, error) {
|
||||
|
104
core/init.go
104
core/init.go
@ -2,9 +2,7 @@ package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
@ -18,11 +16,6 @@ func (sg *SuperGraph) initConfig() error {
|
||||
flect.AddPlural(k, v)
|
||||
}
|
||||
|
||||
// Variables: Validate and sanitize
|
||||
for k, v := range c.Vars {
|
||||
c.Vars[k] = sanitizeVars(v)
|
||||
}
|
||||
|
||||
// Tables: Validate and sanitize
|
||||
tm := make(map[string]struct{})
|
||||
|
||||
@ -70,17 +63,24 @@ func (sg *SuperGraph) initConfig() error {
|
||||
sg.roles["user"] = &ur
|
||||
}
|
||||
|
||||
// Roles: validate and sanitize
|
||||
c.RolesQuery = sanitize(c.RolesQuery)
|
||||
// If anon role is not defined then create it
|
||||
if _, ok := sg.roles["anon"]; !ok {
|
||||
ur := Role{
|
||||
Name: "anon",
|
||||
tm: make(map[string]*RoleTable),
|
||||
}
|
||||
c.Roles = append(c.Roles, ur)
|
||||
sg.roles["anon"] = &ur
|
||||
}
|
||||
|
||||
if len(c.RolesQuery) == 0 {
|
||||
sg.log.Printf("WRN roles_query not defined: attribute based access control disabled")
|
||||
if c.RolesQuery == "" {
|
||||
sg.log.Printf("INF roles_query not defined: attribute based access control disabled")
|
||||
}
|
||||
|
||||
_, userExists := sg.roles["user"]
|
||||
_, sg.anonExists = sg.roles["anon"]
|
||||
|
||||
sg.abacEnabled = userExists && len(c.RolesQuery) != 0
|
||||
sg.abacEnabled = userExists && c.RolesQuery != ""
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -102,12 +102,13 @@ func getDBTableAliases(c *Config) map[string][]string {
|
||||
|
||||
func addTables(c *Config, di *psql.DBInfo) error {
|
||||
for _, t := range c.Tables {
|
||||
if len(t.Table) == 0 || len(t.Columns) == 0 {
|
||||
if t.Table == "" || len(t.Columns) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := addTable(di, t.Columns, t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -152,7 +153,7 @@ func addTable(di *psql.DBInfo, cols []Column, t Table) error {
|
||||
func addForeignKeys(c *Config, di *psql.DBInfo) error {
|
||||
for _, t := range c.Tables {
|
||||
for _, c := range t.Columns {
|
||||
if len(c.ForeignKey) == 0 {
|
||||
if c.ForeignKey == "" {
|
||||
continue
|
||||
}
|
||||
if err := addForeignKey(di, c, t); err != nil {
|
||||
@ -195,7 +196,7 @@ func addForeignKey(di *psql.DBInfo, c Column, t Table) error {
|
||||
func addRoles(c *Config, qc *qcode.Compiler) error {
|
||||
for _, r := range c.Roles {
|
||||
for _, t := range r.Tables {
|
||||
if err := addRole(qc, r, t); err != nil {
|
||||
if err := addRole(qc, r, t, c.DefaultBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -204,54 +205,63 @@ func addRoles(c *Config, qc *qcode.Compiler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRole(qc *qcode.Compiler, r Role, t RoleTable) error {
|
||||
blockFilter := []string{"false"}
|
||||
func addRole(qc *qcode.Compiler, r Role, t RoleTable, defaultBlock bool) error {
|
||||
ro := false // read-only
|
||||
|
||||
query := qcode.QueryConfig{
|
||||
if defaultBlock && r.Name == "anon" {
|
||||
ro = true
|
||||
}
|
||||
|
||||
if t.ReadOnly {
|
||||
ro = true
|
||||
}
|
||||
|
||||
query := qcode.QueryConfig{Block: false}
|
||||
insert := qcode.InsertConfig{Block: ro}
|
||||
update := qcode.UpdateConfig{Block: ro}
|
||||
del := qcode.DeleteConfig{Block: ro}
|
||||
|
||||
if t.Query != nil {
|
||||
query = qcode.QueryConfig{
|
||||
Limit: t.Query.Limit,
|
||||
Filters: t.Query.Filters,
|
||||
Columns: t.Query.Columns,
|
||||
DisableFunctions: t.Query.DisableFunctions,
|
||||
Block: t.Query.Block,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Query.Block {
|
||||
query.Filters = blockFilter
|
||||
}
|
||||
|
||||
insert := qcode.InsertConfig{
|
||||
if t.Insert != nil {
|
||||
insert = qcode.InsertConfig{
|
||||
Filters: t.Insert.Filters,
|
||||
Columns: t.Insert.Columns,
|
||||
Presets: t.Insert.Presets,
|
||||
Block: t.Insert.Block,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Insert.Block {
|
||||
insert.Filters = blockFilter
|
||||
}
|
||||
|
||||
update := qcode.UpdateConfig{
|
||||
if t.Update != nil {
|
||||
update = qcode.UpdateConfig{
|
||||
Filters: t.Update.Filters,
|
||||
Columns: t.Update.Columns,
|
||||
Presets: t.Update.Presets,
|
||||
Block: t.Update.Block,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Update.Block {
|
||||
update.Filters = blockFilter
|
||||
}
|
||||
|
||||
delete := qcode.DeleteConfig{
|
||||
if t.Delete != nil {
|
||||
del = qcode.DeleteConfig{
|
||||
Filters: t.Delete.Filters,
|
||||
Columns: t.Delete.Columns,
|
||||
Block: t.Delete.Block,
|
||||
}
|
||||
|
||||
if t.Delete.Block {
|
||||
delete.Filters = blockFilter
|
||||
}
|
||||
|
||||
return qc.AddRole(r.Name, t.Name, qcode.TRConfig{
|
||||
Query: query,
|
||||
Insert: insert,
|
||||
Update: update,
|
||||
Delete: delete,
|
||||
Delete: del,
|
||||
})
|
||||
}
|
||||
|
||||
@ -262,23 +272,3 @@ func (r *Role) GetTable(name string) *RoleTable {
|
||||
func sanitize(value string) string {
|
||||
return strings.ToLower(strings.TrimSpace(value))
|
||||
}
|
||||
|
||||
var (
|
||||
varRe1 = regexp.MustCompile(`(?mi)\$([a-zA-Z0-9_.]+)`)
|
||||
varRe2 = regexp.MustCompile(`\{\{([a-zA-Z0-9_.]+)\}\}`)
|
||||
)
|
||||
|
||||
func sanitizeVars(s string) string {
|
||||
s0 := varRe1.ReplaceAllString(s, `{{$1}}`)
|
||||
|
||||
s1 := strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return ' '
|
||||
}
|
||||
return r
|
||||
}, s0)
|
||||
|
||||
return varRe2.ReplaceAllStringFunc(s1, func(m string) string {
|
||||
return strings.ToLower(m)
|
||||
})
|
||||
}
|
||||
|
@ -6,9 +6,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/chirino/graphql/schema"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -32,12 +36,13 @@ type List struct {
|
||||
type Config struct {
|
||||
CreateIfNotExists bool
|
||||
Persist bool
|
||||
Log *log.Logger
|
||||
}
|
||||
|
||||
func New(filename string, conf Config) (*List, error) {
|
||||
al := List{}
|
||||
|
||||
if len(filename) != 0 {
|
||||
if filename != "" {
|
||||
fp := filename
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
@ -47,7 +52,7 @@ func New(filename string, conf Config) (*List, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(al.filepath) == 0 {
|
||||
if al.filepath == "" {
|
||||
fp := "./allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
@ -57,7 +62,7 @@ func New(filename string, conf Config) (*List, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(al.filepath) == 0 {
|
||||
if al.filepath == "" {
|
||||
fp := "./config/allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
@ -67,16 +72,22 @@ func New(filename string, conf Config) (*List, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(al.filepath) == 0 {
|
||||
if al.filepath == "" {
|
||||
if !conf.CreateIfNotExists {
|
||||
return nil, errors.New("allow.list not found")
|
||||
}
|
||||
|
||||
if len(filename) == 0 {
|
||||
if filename == "" {
|
||||
al.filepath = "./config/allow.list"
|
||||
} else {
|
||||
al.filepath = filename
|
||||
}
|
||||
|
||||
if file, err := os.OpenFile(al.filepath, os.O_RDONLY|os.O_CREATE, 0644); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
@ -86,8 +97,10 @@ func New(filename string, conf Config) (*List, error) {
|
||||
|
||||
go func() {
|
||||
for v := range al.saveChan {
|
||||
if err = al.save(v); err != nil {
|
||||
break
|
||||
err := al.save(v)
|
||||
|
||||
if err != nil && conf.Log != nil {
|
||||
conf.Log.Println("WRN allow list save:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -109,7 +122,7 @@ func (al *List) Set(vars []byte, query, comment string) error {
|
||||
return errors.New("allow.list is read-only")
|
||||
}
|
||||
|
||||
if len(query) == 0 {
|
||||
if query == "" {
|
||||
return errors.New("empty query")
|
||||
}
|
||||
|
||||
@ -138,6 +151,7 @@ func (al *List) Set(vars []byte, query, comment string) error {
|
||||
|
||||
func (al *List) Load() ([]Item, error) {
|
||||
var list []Item
|
||||
varString := "variables"
|
||||
|
||||
b, err := ioutil.ReadFile(al.filepath)
|
||||
if err != nil {
|
||||
@ -178,9 +192,9 @@ func (al *List) Load() ([]Item, error) {
|
||||
s = e
|
||||
}
|
||||
ty = AL_QUERY
|
||||
} else if matchPrefix(b, e, "variables") {
|
||||
} else if matchPrefix(b, e, varString) {
|
||||
if c == 0 {
|
||||
s = e + len("variables") + 1
|
||||
s = e + len(varString) + 1
|
||||
}
|
||||
ty = AL_VARS
|
||||
} else if b[e] == '{' {
|
||||
@ -230,10 +244,22 @@ func (al *List) Load() ([]Item, error) {
|
||||
}
|
||||
|
||||
func (al *List) save(item Item) error {
|
||||
item.Name = QueryName(item.Query)
|
||||
var buf bytes.Buffer
|
||||
|
||||
qd := &schema.QueryDocument{}
|
||||
|
||||
if err := qd.Parse(item.Query); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
qd.WriteTo(&buf)
|
||||
query := buf.String()
|
||||
buf.Reset()
|
||||
|
||||
item.Name = QueryName(query)
|
||||
item.key = strings.ToLower(item.Name)
|
||||
|
||||
if len(item.Name) == 0 {
|
||||
if item.Name == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -252,7 +278,7 @@ func (al *List) save(item Item) error {
|
||||
}
|
||||
|
||||
if index != -1 {
|
||||
if len(list[index].Comment) != 0 {
|
||||
if list[index].Comment != "" {
|
||||
item.Comment = list[index].Comment
|
||||
}
|
||||
list[index] = item
|
||||
@ -276,7 +302,7 @@ func (al *List) save(item Item) error {
|
||||
|
||||
i := 0
|
||||
for _, c := range cmtLines {
|
||||
if c = strings.TrimSpace(c); len(c) == 0 {
|
||||
if c = strings.TrimSpace(c); c == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -298,9 +324,16 @@ func (al *List) save(item Item) error {
|
||||
}
|
||||
|
||||
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
|
||||
vj, err := json.MarshalIndent(v.Vars, "", " ")
|
||||
buf.Reset()
|
||||
|
||||
if err := jsn.Clear(&buf, v.Vars); err != nil {
|
||||
return fmt.Errorf("failed to clean vars: %w", err)
|
||||
}
|
||||
vj := json.RawMessage(buf.Bytes())
|
||||
|
||||
vj, err = json.MarshalIndent(vj, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal vars: %v", err)
|
||||
return fmt.Errorf("failed to marshal vars: %w", err)
|
||||
}
|
||||
|
||||
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
|
||||
|
@ -0,0 +1,88 @@
|
||||
package cockraochdb_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
integration_tests "github.com/dosco/super-graph/core/internal/integration_tests"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCockroachDB(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "temp-cockraochdb-")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("cockroach", "start", "--insecure", "--listen-addr", ":0", "--http-addr", ":0", "--store=path="+dir)
|
||||
finder := &urlFinder{
|
||||
c: make(chan bool),
|
||||
}
|
||||
cmd.Stdout = finder
|
||||
cmd.Stderr = ioutil.Discard
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
t.Skip("is CockroachDB installed?: " + err.Error())
|
||||
return
|
||||
}
|
||||
fmt.Println("started temporary cockroach db")
|
||||
|
||||
stopped := int32(0)
|
||||
stopDatabase := func() {
|
||||
fmt.Println("stopping temporary cockroach db")
|
||||
if atomic.CompareAndSwapInt32(&stopped, 0, 1) {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := cmd.Process.Wait(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
defer stopDatabase()
|
||||
|
||||
// Wait till we figure out the URL we should connect to...
|
||||
<-finder.c
|
||||
db, err := sql.Open("pgx", finder.URL)
|
||||
if err != nil {
|
||||
stopDatabase()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
integration_tests.SetupSchema(t, db)
|
||||
|
||||
integration_tests.TestSuperGraph(t, db, func(t *testing.T) {
|
||||
if t.Name() == "TestCockroachDB/nested_insert" {
|
||||
t.Skip("nested inserts currently not working yet on cockroach db")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type urlFinder struct {
|
||||
c chan bool
|
||||
done bool
|
||||
URL string
|
||||
}
|
||||
|
||||
func (finder *urlFinder) Write(p []byte) (n int, err error) {
|
||||
s := string(p)
|
||||
urlRegex := regexp.MustCompile(`\nsql:\s+(postgresql:[^\s]+)\n`)
|
||||
if !finder.done {
|
||||
submatch := urlRegex.FindAllStringSubmatch(s, -1)
|
||||
if submatch != nil {
|
||||
finder.URL = submatch[0][1]
|
||||
finder.done = true
|
||||
close(finder.c)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
249
core/internal/integration_tests/integration_tests.go
Normal file
249
core/internal/integration_tests/integration_tests.go
Normal file
@ -0,0 +1,249 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func SetupSchema(t *testing.T, db *sql.DB) {
|
||||
|
||||
_, err := db.Exec(`
|
||||
CREATE TABLE users (
|
||||
id integer PRIMARY KEY,
|
||||
full_name text
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE product (
|
||||
id integer PRIMARY KEY,
|
||||
name text,
|
||||
weight float
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE line_item (
|
||||
id integer PRIMARY KEY,
|
||||
product integer REFERENCES product(id),
|
||||
quantity integer,
|
||||
price float
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func DropSchema(t *testing.T, db *sql.DB) {
|
||||
|
||||
_, err := db.Exec(`DROP TABLE IF EXISTS line_item`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`DROP TABLE IF EXISTS product`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`DROP TABLE IF EXISTS users`)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSuperGraph(t *testing.T, db *sql.DB, before func(t *testing.T)) {
|
||||
config := core.Config{}
|
||||
config.UseAllowList = false
|
||||
config.AllowListFile = "./allow.list"
|
||||
config.RolesQuery = `SELECT * FROM users WHERE id = $user_id`
|
||||
|
||||
sg, err := core.NewSuperGraph(&config, db)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("seed fixtures", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { products (insert: $products) { id } }`,
|
||||
json.RawMessage(`{"products":[
|
||||
{"id":1, "name":"Charmin Ultra Soft", "weight": 0.5},
|
||||
{"id":2, "name":"Hand Sanitizer", "weight": 0.2},
|
||||
{"id":3, "name":"Case of Corona", "weight": 1.2}
|
||||
]}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"products": [{"id": 1}, {"id": 2}, {"id": 3}]}`, string(res.Data))
|
||||
|
||||
res, err = sg.GraphQL(ctx,
|
||||
`mutation { line_items (insert: $line_items) { id } }`,
|
||||
json.RawMessage(`{"line_items":[
|
||||
{"id":5001, "product":1, "price":6.95, "quantity":10},
|
||||
{"id":5002, "product":2, "price":10.99, "quantity":2}
|
||||
]}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5001}, {"id": 5002}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("get line item", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`query { line_item(id:$id) { id, price, quantity } }`,
|
||||
json.RawMessage(`{"id":5001}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5001, "price": 6.95, "quantity": 10}}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("get line items", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`query { line_items { id, price, quantity } }`,
|
||||
json.RawMessage(`{}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5001, "price": 6.95, "quantity": 10}, {"id": 5002, "price": 10.99, "quantity": 2}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("update line item", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { line_item(update:$update, id:$id) { id } }`,
|
||||
json.RawMessage(`{"id":5001, "update":{"quantity":20}}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5001}}`, string(res.Data))
|
||||
|
||||
res, err = sg.GraphQL(ctx,
|
||||
`query { line_item(id:$id) { id, price, quantity } }`,
|
||||
json.RawMessage(`{"id":5001}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5001, "price": 6.95, "quantity": 20}}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("delete line item", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { line_item(delete:true, id:$id) { id } }`,
|
||||
json.RawMessage(`{"id":5002}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5002}}`, string(res.Data))
|
||||
|
||||
res, err = sg.GraphQL(ctx,
|
||||
`query { line_items { id, price, quantity } }`,
|
||||
json.RawMessage(`{}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5001, "price": 6.95, "quantity": 20}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("nested insert", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { line_items (insert: $line_item) { id, product { name } } }`,
|
||||
json.RawMessage(`{"line_item":
|
||||
{"id":5003, "product": { "connect": { "id": 1} }, "price":10.95, "quantity":15}
|
||||
}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5003, "product": {"name": "Charmin Ultra Soft"}}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("schema introspection", func(t *testing.T) {
|
||||
before(t)
|
||||
schema, err := sg.GraphQLSchema()
|
||||
require.NoError(t, err)
|
||||
// Uncomment the following line if you need to regenerate the expected schema.
|
||||
//ioutil.WriteFile("../introspection.graphql", []byte(schema), 0644)
|
||||
expected, err := ioutil.ReadFile("../introspection.graphql")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(expected), schema)
|
||||
})
|
||||
|
||||
res, err := sg.GraphQL(ctx, introspectionQuery, json.RawMessage(``))
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, string(res.Data),
|
||||
`{"queryType":{"name":"Query"},"mutationType":{"name":"Mutation"},"subscriptionType":null,"types":`)
|
||||
}
|
||||
|
||||
const introspectionQuery = `
|
||||
query IntrospectionQuery {
|
||||
__schema {
|
||||
queryType { name }
|
||||
mutationType { name }
|
||||
subscriptionType { name }
|
||||
types {
|
||||
...FullType
|
||||
}
|
||||
directives {
|
||||
name
|
||||
description
|
||||
locations
|
||||
args {
|
||||
...InputValue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fragment FullType on __Type {
|
||||
kind
|
||||
name
|
||||
description
|
||||
fields(includeDeprecated: true) {
|
||||
name
|
||||
description
|
||||
args {
|
||||
...InputValue
|
||||
}
|
||||
type {
|
||||
...TypeRef
|
||||
}
|
||||
isDeprecated
|
||||
deprecationReason
|
||||
}
|
||||
inputFields {
|
||||
...InputValue
|
||||
}
|
||||
interfaces {
|
||||
...TypeRef
|
||||
}
|
||||
enumValues(includeDeprecated: true) {
|
||||
name
|
||||
description
|
||||
isDeprecated
|
||||
deprecationReason
|
||||
}
|
||||
possibleTypes {
|
||||
...TypeRef
|
||||
}
|
||||
}
|
||||
fragment InputValue on __InputValue {
|
||||
name
|
||||
description
|
||||
type { ...TypeRef }
|
||||
defaultValue
|
||||
}
|
||||
fragment TypeRef on __Type {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
319
core/internal/integration_tests/introspection.graphql
Normal file
319
core/internal/integration_tests/introspection.graphql
Normal file
@ -0,0 +1,319 @@
|
||||
input FloatExpression {
|
||||
contained_in:String!
|
||||
contains:[Float!]!
|
||||
eq:Float!
|
||||
equals:Float!
|
||||
greater_or_equals:Float!
|
||||
greater_than:Float!
|
||||
gt:Float!
|
||||
gte:Float!
|
||||
has_key:Float!
|
||||
has_key_all:[Float!]!
|
||||
has_key_any:[Float!]!
|
||||
ilike:String!
|
||||
in:[Float!]!
|
||||
is_null:Boolean!
|
||||
lesser_or_equals:Float!
|
||||
lesser_than:Float!
|
||||
like:String!
|
||||
lt:Float!
|
||||
lte:Float!
|
||||
neq:Float!
|
||||
nilike:String!
|
||||
nin:[Float!]!
|
||||
nlike:String!
|
||||
not_equals:Float!
|
||||
not_ilike:String!
|
||||
not_in:[Float!]!
|
||||
not_like:String!
|
||||
not_similar:String!
|
||||
nsimilar:String!
|
||||
similar:String!
|
||||
}
|
||||
input IntExpression {
|
||||
contained_in:String!
|
||||
contains:[Int!]!
|
||||
eq:Int!
|
||||
equals:Int!
|
||||
greater_or_equals:Int!
|
||||
greater_than:Int!
|
||||
gt:Int!
|
||||
gte:Int!
|
||||
has_key:Int!
|
||||
has_key_all:[Int!]!
|
||||
has_key_any:[Int!]!
|
||||
ilike:String!
|
||||
in:[Int!]!
|
||||
is_null:Boolean!
|
||||
lesser_or_equals:Int!
|
||||
lesser_than:Int!
|
||||
like:String!
|
||||
lt:Int!
|
||||
lte:Int!
|
||||
neq:Int!
|
||||
nilike:String!
|
||||
nin:[Int!]!
|
||||
nlike:String!
|
||||
not_equals:Int!
|
||||
not_ilike:String!
|
||||
not_in:[Int!]!
|
||||
not_like:String!
|
||||
not_similar:String!
|
||||
nsimilar:String!
|
||||
similar:String!
|
||||
}
|
||||
type Mutation {
|
||||
line_item(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:line_itemInput, update:line_itemInput, upsert:line_itemInput
|
||||
):line_itemOutput
|
||||
line_items(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:line_itemInput, update:line_itemInput, upsert:line_itemInput, inserts:[line_itemInput!]!, updates:[line_itemInput!]!, upserts:[line_itemInput!]!
|
||||
):line_itemOutput
|
||||
product(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:productInput, update:productInput, upsert:productInput
|
||||
):productOutput
|
||||
products(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:productInput, update:productInput, upsert:productInput, inserts:[productInput!]!, updates:[productInput!]!, upserts:[productInput!]!
|
||||
):productOutput
|
||||
user(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:userInput, update:userInput, upsert:userInput
|
||||
):userOutput
|
||||
users(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:userInput, update:userInput, upsert:userInput, inserts:[userInput!]!, updates:[userInput!]!, upserts:[userInput!]!
|
||||
):userOutput
|
||||
}
|
||||
enum OrderDirection {
|
||||
asc
|
||||
desc
|
||||
}
|
||||
type Query {
|
||||
line_item(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):line_itemOutput
|
||||
line_items(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):[line_itemOutput!]!
|
||||
product(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):productOutput
|
||||
products(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):[productOutput!]!
|
||||
user(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):userOutput
|
||||
users(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):[userOutput!]!
|
||||
}
|
||||
input StringExpression {
|
||||
contained_in:String!
|
||||
contains:[String!]!
|
||||
eq:String!
|
||||
equals:String!
|
||||
greater_or_equals:String!
|
||||
greater_than:String!
|
||||
gt:String!
|
||||
gte:String!
|
||||
has_key:String!
|
||||
has_key_all:[String!]!
|
||||
has_key_any:[String!]!
|
||||
ilike:String!
|
||||
in:[String!]!
|
||||
is_null:Boolean!
|
||||
lesser_or_equals:String!
|
||||
lesser_than:String!
|
||||
like:String!
|
||||
lt:String!
|
||||
lte:String!
|
||||
neq:String!
|
||||
nilike:String!
|
||||
nin:[String!]!
|
||||
nlike:String!
|
||||
not_equals:String!
|
||||
not_ilike:String!
|
||||
not_in:[String!]!
|
||||
not_like:String!
|
||||
not_similar:String!
|
||||
nsimilar:String!
|
||||
similar:String!
|
||||
}
|
||||
input line_itemExpression {
|
||||
and:line_itemExpression!
|
||||
id:IntExpression!
|
||||
not:line_itemExpression!
|
||||
or:line_itemExpression!
|
||||
price:FloatExpression!
|
||||
product:IntExpression!
|
||||
quantity:IntExpression!
|
||||
}
|
||||
input line_itemInput {
|
||||
id:Int!
|
||||
price:Float
|
||||
product:Int
|
||||
quantity:Int
|
||||
}
|
||||
input line_itemOrderBy {
|
||||
id:OrderDirection!
|
||||
price:OrderDirection!
|
||||
product:OrderDirection!
|
||||
quantity:OrderDirection!
|
||||
}
|
||||
type line_itemOutput {
|
||||
avg_id:Int!
|
||||
avg_price:Float
|
||||
avg_product:Int
|
||||
avg_quantity:Int
|
||||
count_id:Int!
|
||||
count_price:Float
|
||||
count_product:Int
|
||||
count_quantity:Int
|
||||
id:Int!
|
||||
max_id:Int!
|
||||
max_price:Float
|
||||
max_product:Int
|
||||
max_quantity:Int
|
||||
min_id:Int!
|
||||
min_price:Float
|
||||
min_product:Int
|
||||
min_quantity:Int
|
||||
price:Float
|
||||
product:Int
|
||||
quantity:Int
|
||||
stddev_id:Int!
|
||||
stddev_pop_id:Int!
|
||||
stddev_pop_price:Float
|
||||
stddev_pop_product:Int
|
||||
stddev_pop_quantity:Int
|
||||
stddev_price:Float
|
||||
stddev_product:Int
|
||||
stddev_quantity:Int
|
||||
stddev_samp_id:Int!
|
||||
stddev_samp_price:Float
|
||||
stddev_samp_product:Int
|
||||
stddev_samp_quantity:Int
|
||||
var_pop_id:Int!
|
||||
var_pop_price:Float
|
||||
var_pop_product:Int
|
||||
var_pop_quantity:Int
|
||||
var_samp_id:Int!
|
||||
var_samp_price:Float
|
||||
var_samp_product:Int
|
||||
var_samp_quantity:Int
|
||||
variance_id:Int!
|
||||
variance_price:Float
|
||||
variance_product:Int
|
||||
variance_quantity:Int
|
||||
}
|
||||
input productExpression {
|
||||
and:productExpression!
|
||||
id:IntExpression!
|
||||
name:StringExpression!
|
||||
not:productExpression!
|
||||
or:productExpression!
|
||||
weight:FloatExpression!
|
||||
}
|
||||
input productInput {
|
||||
id:Int!
|
||||
name:String
|
||||
weight:Float
|
||||
}
|
||||
input productOrderBy {
|
||||
id:OrderDirection!
|
||||
name:OrderDirection!
|
||||
weight:OrderDirection!
|
||||
}
|
||||
type productOutput {
|
||||
avg_id:Int!
|
||||
avg_weight:Float
|
||||
count_id:Int!
|
||||
count_weight:Float
|
||||
id:Int!
|
||||
max_id:Int!
|
||||
max_weight:Float
|
||||
min_id:Int!
|
||||
min_weight:Float
|
||||
name:String
|
||||
stddev_id:Int!
|
||||
stddev_pop_id:Int!
|
||||
stddev_pop_weight:Float
|
||||
stddev_samp_id:Int!
|
||||
stddev_samp_weight:Float
|
||||
stddev_weight:Float
|
||||
var_pop_id:Int!
|
||||
var_pop_weight:Float
|
||||
var_samp_id:Int!
|
||||
var_samp_weight:Float
|
||||
variance_id:Int!
|
||||
variance_weight:Float
|
||||
weight:Float
|
||||
}
|
||||
input userExpression {
|
||||
and:userExpression!
|
||||
full_name:StringExpression!
|
||||
id:IntExpression!
|
||||
not:userExpression!
|
||||
or:userExpression!
|
||||
}
|
||||
input userInput {
|
||||
full_name:String
|
||||
id:Int!
|
||||
}
|
||||
input userOrderBy {
|
||||
full_name:OrderDirection!
|
||||
id:OrderDirection!
|
||||
}
|
||||
type userOutput {
|
||||
avg_id:Int!
|
||||
count_id:Int!
|
||||
full_name:String
|
||||
id:Int!
|
||||
max_id:Int!
|
||||
min_id:Int!
|
||||
stddev_id:Int!
|
||||
stddev_pop_id:Int!
|
||||
stddev_samp_id:Int!
|
||||
var_pop_id:Int!
|
||||
var_samp_id:Int!
|
||||
variance_id:Int!
|
||||
}
|
||||
schema {
|
||||
mutation: Mutation
|
||||
query: Query
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
package cockraochdb_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
integration_tests "github.com/dosco/super-graph/core/internal/integration_tests"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCockroachDB(t *testing.T) {
|
||||
|
||||
url, found := os.LookupEnv("SG_POSTGRESQL_TEST_URL")
|
||||
if !found {
|
||||
t.Skip("set the SG_POSTGRESQL_TEST_URL env variable if you want to run integration tests against a PostgreSQL database")
|
||||
} else {
|
||||
db, err := sql.Open("pgx", url)
|
||||
require.NoError(t, err)
|
||||
|
||||
integration_tests.DropSchema(t, db)
|
||||
integration_tests.SetupSchema(t, db)
|
||||
integration_tests.TestSuperGraph(t, db, func(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
@ -12,8 +12,7 @@ import (
|
||||
func (c *compilerContext) renderBaseColumns(
|
||||
sel *qcode.Select,
|
||||
ti *DBTableInfo,
|
||||
childCols []*qcode.Column,
|
||||
skipped uint32) ([]int, bool, error) {
|
||||
childCols []*qcode.Column) ([]int, bool, error) {
|
||||
|
||||
var realColsRendered []int
|
||||
|
||||
@ -116,12 +115,12 @@ func (c *compilerContext) renderColumnSearchRank(sel *qcode.Select, ti *DBTableI
|
||||
io.WriteString(c.w, `ts_rank(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `, websearch_to_tsquery('{{`)
|
||||
io.WriteString(c.w, `, websearch_to_tsquery(`)
|
||||
} else {
|
||||
io.WriteString(c.w, `, to_tsquery('{{`)
|
||||
io.WriteString(c.w, `, to_tsquery(`)
|
||||
}
|
||||
io.WriteString(c.w, arg.Val)
|
||||
io.WriteString(c.w, `}}'))`)
|
||||
c.renderValueExp(Param{Name: arg.Val, Type: "string"})
|
||||
io.WriteString(c.w, `))`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
@ -141,12 +140,12 @@ func (c *compilerContext) renderColumnSearchHeadline(sel *qcode.Select, ti *DBTa
|
||||
io.WriteString(c.w, `ts_headline(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `, websearch_to_tsquery('{{`)
|
||||
io.WriteString(c.w, `, websearch_to_tsquery(`)
|
||||
} else {
|
||||
io.WriteString(c.w, `, to_tsquery('{{`)
|
||||
io.WriteString(c.w, `, to_tsquery(`)
|
||||
}
|
||||
io.WriteString(c.w, arg.Val)
|
||||
io.WriteString(c.w, `}}'))`)
|
||||
c.renderValueExp(Param{Name: arg.Val, Type: "string"})
|
||||
io.WriteString(c.w, `))`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
@ -167,7 +166,7 @@ func (c *compilerContext) renderColumnTypename(sel *qcode.Select, ti *DBTableInf
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnFunction(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
pl := funcPrefixLen(col.Name)
|
||||
pl := funcPrefixLen(c.schema.fm, col.Name)
|
||||
// if pl == 0 {
|
||||
// //fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
// io.WriteString(c.w, `'`)
|
||||
|
@ -4,17 +4,18 @@ package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
|
||||
|
||||
schema = getTestSchema()
|
||||
schema, _ = GetTestSchema()
|
||||
|
||||
vars = NewVariables(map[string]string{
|
||||
vars = map[string]string{
|
||||
"admin_account_id": "5",
|
||||
})
|
||||
}
|
||||
|
||||
pcompileTest = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
@ -24,6 +25,37 @@ var (
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func Fuzz(data []byte) int {
|
||||
err1 := query(data)
|
||||
err2 := insert(data)
|
||||
err3 := update(data)
|
||||
err4 := delete(data)
|
||||
|
||||
if err1 != nil || err2 != nil || err3 != nil || err4 != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func query(data []byte) error {
|
||||
gql := data
|
||||
|
||||
qc, err1 := qcompileTest.Compile(gql, "user")
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err2 := pcompileTest.CompileEx(qc, vars)
|
||||
|
||||
if err1 != nil {
|
||||
return err1
|
||||
} else {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
|
||||
func insert(data []byte) error {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
@ -46,9 +78,57 @@ func Fuzz(data []byte) int {
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
return err
|
||||
}
|
||||
|
||||
func update(data []byte) error {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
qc, err := qcompileTest.Compile([]byte(gql), "user")
|
||||
if err != nil {
|
||||
return 0
|
||||
panic("qcompile can't fail")
|
||||
}
|
||||
|
||||
return 1
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
return err
|
||||
}
|
||||
|
||||
func delete(data []byte) error {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
qc, err := qcompileTest.Compile([]byte(gql), "user")
|
||||
if err != nil {
|
||||
panic("qcompile can't fail")
|
||||
}
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
return err
|
||||
}
|
||||
|
20
core/internal/psql/fuzz_test.go
Normal file
20
core/internal/psql/fuzz_test.go
Normal file
@ -0,0 +1,20 @@
|
||||
// +build gofuzz
|
||||
|
||||
package psql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var ret int
|
||||
|
||||
func TestFuzzCrashers(t *testing.T) {
|
||||
var crashers = []string{
|
||||
"{\"connect\":{}}",
|
||||
"q(q{q{q{q{q{q{q{q{",
|
||||
}
|
||||
|
||||
for _, f := range crashers {
|
||||
ret = Fuzz([]byte(f))
|
||||
}
|
||||
}
|
@ -10,8 +10,8 @@ import (
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
func (c *compilerContext) renderInsert(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
insert, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
@ -21,9 +21,16 @@ func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
|
||||
io.WriteString(c.w, qc.ActionVar)
|
||||
io.WriteString(c.w, `}}' :: json AS j)`)
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT `)
|
||||
if insert[0] == '[' {
|
||||
io.WriteString(c.w, `json_array_elements(`)
|
||||
}
|
||||
c.renderValueExp(Param{Name: qc.ActionVar, Type: "json"})
|
||||
io.WriteString(c.w, ` :: json`)
|
||||
if insert[0] == '[' {
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
io.WriteString(c.w, ` AS j)`)
|
||||
|
||||
st := util.NewStack()
|
||||
st.Push(kvitem{_type: itemInsert, key: ti.Name, val: insert, ti: ti})
|
||||
@ -82,34 +89,17 @@ func (c *compilerContext) renderInsertStmt(qc *qcode.QCode, w io.Writer, item re
|
||||
io.WriteString(w, `INSERT INTO `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, ` (`)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, false)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, false)
|
||||
renderNestedInsertRelColumns(w, item.kvitem, false)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
io.WriteString(w, ` SELECT `)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, true)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, true)
|
||||
renderNestedInsertRelColumns(w, item.kvitem, true)
|
||||
|
||||
io.WriteString(w, ` FROM "_sg_input" i, `)
|
||||
io.WriteString(w, ` FROM "_sg_input" i`)
|
||||
renderNestedInsertRelTables(w, item.kvitem)
|
||||
|
||||
if item.array {
|
||||
io.WriteString(w, `json_populate_recordset`)
|
||||
} else {
|
||||
io.WriteString(w, `json_populate_record`)
|
||||
}
|
||||
|
||||
io.WriteString(w, `(NULL::`)
|
||||
io.WriteString(w, ti.Name)
|
||||
|
||||
if len(item.path) == 0 {
|
||||
io.WriteString(w, `, i.j) t RETURNING *)`)
|
||||
} else {
|
||||
io.WriteString(w, `, i.j->`)
|
||||
joinPath(w, item.path)
|
||||
io.WriteString(w, `) t RETURNING *)`)
|
||||
}
|
||||
|
||||
io.WriteString(w, ` RETURNING *)`)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -172,21 +162,21 @@ func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
|
||||
func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
io.WriteString(w, `, `)
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
}
|
||||
} else {
|
||||
// Render tables needed to set values if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
io.WriteString(w, `, `)
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `", `)
|
||||
io.WriteString(w, `"`)
|
||||
} else {
|
||||
quoted(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -6,10 +6,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
@ -33,42 +34,44 @@ var updateTypes = map[string]itemType{
|
||||
|
||||
var noLimit = qcode.Paging{NoLimit: true}
|
||||
|
||||
func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
func (co *Compiler) compileMutation(w io.Writer, qc *qcode.QCode, vars Variables) (Metadata, error) {
|
||||
md := Metadata{}
|
||||
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
return md, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
c := &compilerContext{md, w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
|
||||
ti, err := c.schema.GetTable(root.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
switch qc.Type {
|
||||
case qcode.QTInsert:
|
||||
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
if _, err := c.renderInsert(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
case qcode.QTUpdate:
|
||||
if _, err := c.renderUpdate(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
if _, err := c.renderUpdate(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
case qcode.QTUpsert:
|
||||
if _, err := c.renderUpsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
if _, err := c.renderUpsert(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
case qcode.QTDelete:
|
||||
if _, err := c.renderDelete(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
if _, err := c.renderDelete(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, errors.New("valid mutations are 'insert', 'update', 'upsert' and 'delete'")
|
||||
return c.md, errors.New("valid mutations are 'insert', 'update', 'upsert' and 'delete'")
|
||||
}
|
||||
|
||||
root.Paging = noLimit
|
||||
@ -77,7 +80,7 @@ func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables
|
||||
root.Where = nil
|
||||
root.Args = nil
|
||||
|
||||
return c.compileQuery(qc, w, vars)
|
||||
return co.compileQueryWithMetadata(w, qc, vars, c.md)
|
||||
}
|
||||
|
||||
type kvitem struct {
|
||||
@ -365,12 +368,12 @@ func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderInsertUpdateColumns(w io.Writer,
|
||||
func (c *compilerContext) renderInsertUpdateColumns(
|
||||
qc *qcode.QCode,
|
||||
jt map[string]json.RawMessage,
|
||||
ti *DBTableInfo,
|
||||
skipcols map[string]struct{},
|
||||
values bool) (uint32, error) {
|
||||
isValues bool) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
renderedCol := false
|
||||
@ -392,13 +395,18 @@ func renderInsertUpdateColumns(w io.Writer,
|
||||
}
|
||||
}
|
||||
if n != 0 {
|
||||
io.WriteString(w, `, `)
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
if values {
|
||||
colWithTable(w, "t", cn.Name)
|
||||
if isValues {
|
||||
io.WriteString(c.w, `CAST( i.j ->>`)
|
||||
io.WriteString(c.w, `'`)
|
||||
io.WriteString(c.w, cn.Name)
|
||||
io.WriteString(c.w, `' AS `)
|
||||
io.WriteString(c.w, cn.Type)
|
||||
io.WriteString(c.w, `)`)
|
||||
} else {
|
||||
quoted(w, cn.Name)
|
||||
quoted(c.w, cn.Name)
|
||||
}
|
||||
|
||||
if !renderedCol {
|
||||
@ -417,16 +425,28 @@ func renderInsertUpdateColumns(w io.Writer,
|
||||
continue
|
||||
}
|
||||
if i != 0 || n != 0 {
|
||||
io.WriteString(w, `, `)
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
if values {
|
||||
io.WriteString(w, `'`)
|
||||
io.WriteString(w, root.PresetMap[cn])
|
||||
io.WriteString(w, `' :: `)
|
||||
io.WriteString(w, col.Type)
|
||||
if isValues {
|
||||
val := root.PresetMap[cn]
|
||||
switch {
|
||||
case ok && len(val) > 1 && val[0] == '$':
|
||||
c.renderValueExp(Param{Name: val[1:], Type: col.Type})
|
||||
|
||||
case ok && strings.HasPrefix(val, "sql:"):
|
||||
io.WriteString(c.w, `(`)
|
||||
c.renderVar(val[4:], c.renderValueExp)
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
case ok:
|
||||
squoted(c.w, val)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` :: `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
} else {
|
||||
quoted(w, cn)
|
||||
quoted(c.w, cn)
|
||||
}
|
||||
|
||||
if !renderedCol {
|
||||
@ -435,15 +455,15 @@ func renderInsertUpdateColumns(w io.Writer,
|
||||
}
|
||||
|
||||
if len(skipcols) != 0 && renderedCol {
|
||||
io.WriteString(w, `, `)
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
func (c *compilerContext) renderUpsert(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
upsert, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
|
||||
@ -461,7 +481,7 @@ func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
|
||||
if _, err := c.renderInsert(w, qc, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -522,6 +542,10 @@ func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
|
||||
|
||||
rel := item.relPC
|
||||
|
||||
if rel == nil {
|
||||
return errors.New("invalid connect value")
|
||||
}
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-one
|
||||
// For this to work the child needs to found first so it's primary key
|
||||
// can be set in the related column on the parent object.
|
||||
@ -667,7 +691,7 @@ func renderCteName(w io.Writer, item kvitem) error {
|
||||
io.WriteString(w, item.ti.Name)
|
||||
if item._type == itemConnect || item._type == itemDisconnect {
|
||||
io.WriteString(w, `_`)
|
||||
int2string(w, item.id)
|
||||
int32String(w, item.id)
|
||||
}
|
||||
io.WriteString(w, `"`)
|
||||
return nil
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@ -72,7 +72,7 @@ func delete(t *testing.T) {
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '{{data}}' :: json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '$1' :: json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
@ -97,7 +97,7 @@ func delete(t *testing.T) {
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '{{data}}' :: json AS j) UPDATE "users" SET ("full_name", "email") = (SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t) WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '$1' :: json AS j) UPDATE "users" SET ("full_name", "email") = (SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t) WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
@ -19,7 +20,7 @@ const (
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *Compiler
|
||||
pcompile *psql.Compiler
|
||||
expected map[string][]string
|
||||
)
|
||||
|
||||
@ -133,13 +134,16 @@ func TestMain(m *testing.M) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
schema := getTestSchema()
|
||||
schema, err := psql.GetTestSchema()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
vars := NewVariables(map[string]string{
|
||||
vars := map[string]string{
|
||||
"admin_account_id": "5",
|
||||
})
|
||||
}
|
||||
|
||||
pcompile = NewCompiler(Config{
|
||||
pcompile = psql.NewCompiler(psql.Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
@ -173,7 +177,7 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(t *testing.T, gql string, vars Variables, role string) {
|
||||
func compileGQLToPSQL(t *testing.T, gql string, vars psql.Variables, role string) {
|
||||
generateTestFile := false
|
||||
|
||||
if generateTestFile {
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
@ -17,9 +18,24 @@ const (
|
||||
closeBlock = 500
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAllTablesSkipped = errors.New("all tables skipped. cannot render query")
|
||||
)
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
IsArray bool
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
Skipped uint32
|
||||
Params []Param
|
||||
pindex map[string]int
|
||||
}
|
||||
|
||||
type compilerContext struct {
|
||||
md Metadata
|
||||
w io.Writer
|
||||
s []qcode.Select
|
||||
*Compiler
|
||||
}
|
||||
|
||||
type Variables map[string]json.RawMessage
|
||||
|
||||
@ -40,12 +56,12 @@ func NewCompiler(conf Config) *Compiler {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
|
||||
return c.schema.SetRel(child, parent, rel)
|
||||
func (co *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
|
||||
return co.schema.SetRel(child, parent, rel)
|
||||
}
|
||||
|
||||
func (c *Compiler) IDColumn(table string) (*DBColumn, error) {
|
||||
ti, err := c.schema.GetTable(table)
|
||||
func (co *Compiler) IDColumn(table string) (*DBColumn, error) {
|
||||
ti, err := co.schema.GetTable(table)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -57,65 +73,75 @@ func (c *Compiler) IDColumn(table string) (*DBColumn, error) {
|
||||
return ti.PrimaryCol, nil
|
||||
}
|
||||
|
||||
type compilerContext struct {
|
||||
w io.Writer
|
||||
s []qcode.Select
|
||||
*Compiler
|
||||
}
|
||||
|
||||
func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (uint32, []byte, error) {
|
||||
func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (Metadata, []byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
skipped, err := co.Compile(qc, w, vars)
|
||||
return skipped, w.Bytes(), err
|
||||
metad, err := co.Compile(w, qc, vars)
|
||||
return metad, w.Bytes(), err
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
func (co *Compiler) Compile(w io.Writer, qc *qcode.QCode, vars Variables) (Metadata, error) {
|
||||
if qc == nil {
|
||||
return Metadata{}, fmt.Errorf("qcode is nil")
|
||||
}
|
||||
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQuery(qc, w, vars)
|
||||
case qcode.QTInsert, qcode.QTUpdate, qcode.QTDelete, qcode.QTUpsert:
|
||||
return co.compileMutation(qc, w, vars)
|
||||
return co.compileQuery(w, qc, vars)
|
||||
|
||||
case qcode.QTInsert,
|
||||
qcode.QTUpdate,
|
||||
qcode.QTDelete,
|
||||
qcode.QTUpsert:
|
||||
return co.compileMutation(w, qc, vars)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Unknown operation type %d", qc.Type)
|
||||
return Metadata{}, fmt.Errorf("Unknown operation type %d", qc.Type)
|
||||
}
|
||||
|
||||
func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
func (co *Compiler) compileQuery(w io.Writer, qc *qcode.QCode, vars Variables) (Metadata, error) {
|
||||
return co.compileQueryWithMetadata(w, qc, vars, Metadata{})
|
||||
}
|
||||
|
||||
func (co *Compiler) compileQueryWithMetadata(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, md Metadata) (Metadata, error) {
|
||||
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
return md, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
|
||||
c := &compilerContext{md, w, qc.Selects, co}
|
||||
st := NewIntStack()
|
||||
i := 0
|
||||
|
||||
io.WriteString(c.w, `SELECT jsonb_build_object(`)
|
||||
for _, id := range qc.Roots {
|
||||
root := &qc.Selects[id]
|
||||
if root.SkipRender || len(root.Cols) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
st.Push(root.ID + closeBlock)
|
||||
st.Push(root.ID)
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
root := &qc.Selects[id]
|
||||
|
||||
if root.SkipRender || len(root.Cols) == 0 {
|
||||
squoted(c.w, root.FieldName)
|
||||
io.WriteString(c.w, `, `)
|
||||
io.WriteString(c.w, `NULL`)
|
||||
|
||||
} else {
|
||||
st.Push(root.ID + closeBlock)
|
||||
st.Push(root.ID)
|
||||
c.renderRootSelect(root)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
if st.Len() != 0 {
|
||||
io.WriteString(c.w, `) as "__root" FROM `)
|
||||
|
||||
if i == 0 {
|
||||
return 0, ErrAllTablesSkipped
|
||||
} else {
|
||||
io.WriteString(c.w, `) as "__root"`)
|
||||
return c.md, nil
|
||||
}
|
||||
|
||||
var ignored uint32
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
@ -132,7 +158,7 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
if sel.ParentID == -1 {
|
||||
@ -141,18 +167,16 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (
|
||||
c.renderLateralJoin(sel)
|
||||
}
|
||||
|
||||
if !ti.Singular {
|
||||
if !ti.IsSingular {
|
||||
c.renderPluralSelect(sel, ti)
|
||||
}
|
||||
|
||||
skipped, err := c.renderSelect(sel, ti, vars)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if err := c.renderSelect(sel, ti, vars); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
ignored |= skipped
|
||||
|
||||
for _, cid := range sel.Children {
|
||||
if hasBit(skipped, uint32(cid)) {
|
||||
if hasBit(c.md.Skipped, uint32(cid)) {
|
||||
continue
|
||||
}
|
||||
child := &c.s[cid]
|
||||
@ -169,7 +193,7 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
@ -178,7 +202,7 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sj", sel.ID)
|
||||
|
||||
if !ti.Singular {
|
||||
if !ti.IsSingular {
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sj", sel.ID)
|
||||
}
|
||||
@ -201,12 +225,12 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (
|
||||
}
|
||||
}
|
||||
|
||||
return ignored, nil
|
||||
return c.md, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderPluralSelect(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
io.WriteString(c.w, `SELECT coalesce(jsonb_agg("__sj_`)
|
||||
int2string(c.w, sel.ID)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."json"), '[]') as "json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
@ -230,7 +254,7 @@ func (c *compilerContext) renderPluralSelect(sel *qcode.Select, ti *DBTableInfo)
|
||||
io.WriteString(c.w, `, CONCAT_WS(','`)
|
||||
for i := 0; i < n; i++ {
|
||||
io.WriteString(c.w, `, max("__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `")`)
|
||||
}
|
||||
io.WriteString(c.w, `) as "cursor"`)
|
||||
@ -246,7 +270,7 @@ func (c *compilerContext) renderRootSelect(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `', `)
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int2string(c.w, sel.ID)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
@ -255,16 +279,14 @@ func (c *compilerContext) renderRootSelect(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `_cursor', `)
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int2string(c.w, sel.ID)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."cursor"`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) (uint32, []*qcode.Column, error) {
|
||||
var skipped uint32
|
||||
|
||||
func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) ([]*qcode.Column, error) {
|
||||
cols := make([]*qcode.Column, 0, len(sel.Cols))
|
||||
colmap := make(map[string]struct{}, len(sel.Cols))
|
||||
|
||||
@ -306,9 +328,7 @@ func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Va
|
||||
|
||||
rel, err := c.schema.GetRel(child.Name, ti.Name)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
//skipped |= (1 << uint(id))
|
||||
//continue
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
@ -334,16 +354,15 @@ func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Va
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Right.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
skipped |= (1 << uint(id))
|
||||
c.md.Skipped |= (1 << uint(id))
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, nil, fmt.Errorf("unknown relationship %s", rel)
|
||||
//skipped |= (1 << uint(id))
|
||||
return nil, fmt.Errorf("unknown relationship %s", rel)
|
||||
}
|
||||
}
|
||||
|
||||
return skipped, cols, nil
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
// This
|
||||
@ -412,7 +431,7 @@ func (c *compilerContext) addSeekPredicate(sel *qcode.Select) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) (uint32, error) {
|
||||
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) error {
|
||||
var rel *DBRel
|
||||
var err error
|
||||
|
||||
@ -421,13 +440,13 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars
|
||||
|
||||
rel, err = c.schema.GetRel(ti.Name, parent.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
skipped, childCols, err := c.initSelect(sel, ti, vars)
|
||||
childCols, err := c.initSelect(sel, ti, vars)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
|
||||
// SELECT
|
||||
@ -437,13 +456,13 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars
|
||||
// }
|
||||
|
||||
io.WriteString(c.w, `SELECT to_jsonb("__sr_`)
|
||||
int2string(c.w, sel.ID)
|
||||
io.WriteString(c.w, `") `)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `".*) `)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i := range sel.OrderBy {
|
||||
io.WriteString(c.w, `- '__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `' `)
|
||||
}
|
||||
}
|
||||
@ -453,15 +472,15 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i := range sel.OrderBy {
|
||||
io.WriteString(c.w, `, "__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `FROM (SELECT `)
|
||||
|
||||
if err := c.renderColumns(sel, ti, skipped); err != nil {
|
||||
return 0, err
|
||||
if err := c.renderColumns(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
@ -469,7 +488,7 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars
|
||||
io.WriteString(c.w, `, LAST_VALUE(`)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, ob.Col)
|
||||
io.WriteString(c.w, `) OVER() AS "__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
}
|
||||
@ -477,9 +496,8 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars
|
||||
io.WriteString(c.w, ` FROM (`)
|
||||
|
||||
// FROM (SELECT .... )
|
||||
err = c.renderBaseSelect(sel, ti, rel, childCols, skipped)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
if err = c.renderBaseSelect(sel, ti, rel, childCols); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Name, c.sel.ID)
|
||||
@ -488,7 +506,7 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars
|
||||
|
||||
// END-FROM
|
||||
|
||||
return skipped, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderLateralJoin(sel *qcode.Select) error {
|
||||
@ -538,12 +556,12 @@ func (c *compilerContext) renderJoinByName(table, parent string, id int32) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
|
||||
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
i := 0
|
||||
var cn string
|
||||
|
||||
for _, col := range sel.Cols {
|
||||
if n := funcPrefixLen(col.Name); n != 0 {
|
||||
if n := funcPrefixLen(c.schema.fm, col.Name); n != 0 {
|
||||
if !sel.Functions {
|
||||
continue
|
||||
}
|
||||
@ -574,7 +592,7 @@ func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo, skip
|
||||
|
||||
i += c.renderRemoteRelColumns(sel, ti, i)
|
||||
|
||||
return c.renderJoinColumns(sel, ti, skipped, i)
|
||||
return c.renderJoinColumns(sel, ti, i)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo, colsRendered int) int {
|
||||
@ -599,12 +617,12 @@ func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableI
|
||||
return i
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32, colsRendered int) error {
|
||||
func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo, colsRendered int) error {
|
||||
// columns previously rendered
|
||||
i := colsRendered
|
||||
|
||||
for _, id := range sel.Children {
|
||||
if hasBit(skipped, uint32(id)) {
|
||||
if hasBit(c.md.Skipped, uint32(id)) {
|
||||
continue
|
||||
}
|
||||
childSel := &c.s[id]
|
||||
@ -620,13 +638,13 @@ func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo,
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int2string(c.w, childSel.ID)
|
||||
int32String(c.w, childSel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
alias(c.w, childSel.FieldName)
|
||||
|
||||
if childSel.Paging.Type != qcode.PtOffset {
|
||||
io.WriteString(c.w, `, "__sj_`)
|
||||
int2string(c.w, childSel.ID)
|
||||
int32String(c.w, childSel.ID)
|
||||
io.WriteString(c.w, `"."cursor" AS "`)
|
||||
io.WriteString(c.w, childSel.FieldName)
|
||||
io.WriteString(c.w, `_cursor"`)
|
||||
@ -639,7 +657,7 @@ func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo,
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, rel *DBRel,
|
||||
childCols []*qcode.Column, skipped uint32) error {
|
||||
childCols []*qcode.Column) error {
|
||||
isRoot := (rel == nil)
|
||||
isFil := (sel.Where != nil && sel.Where.Op != qcode.OpNop)
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
@ -654,7 +672,7 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, r
|
||||
c.renderDistinctOn(sel, ti)
|
||||
}
|
||||
|
||||
realColsRendered, isAgg, err := c.renderBaseColumns(sel, ti, childCols, skipped)
|
||||
realColsRendered, isAgg, err := c.renderBaseColumns(sel, ti, childCols)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -706,7 +724,7 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, r
|
||||
}
|
||||
|
||||
switch {
|
||||
case ti.Singular:
|
||||
case ti.IsSingular:
|
||||
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
|
||||
|
||||
case len(sel.Paging.Limit) != 0:
|
||||
@ -781,11 +799,13 @@ func (c *compilerContext) renderCursorCTE(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, `a[`)
|
||||
int2string(c.w, int32(i+1))
|
||||
int32String(c.w, int32(i+1))
|
||||
io.WriteString(c.w, `] as `)
|
||||
quoted(c.w, ob.Col)
|
||||
}
|
||||
io.WriteString(c.w, ` FROM string_to_array('{{cursor}}', ',') as a) `)
|
||||
io.WriteString(c.w, ` FROM string_to_array(`)
|
||||
c.renderValueExp(Param{Name: "cursor", Type: "json"})
|
||||
io.WriteString(c.w, `, ',') as a) `)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -921,8 +941,6 @@ func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested b
|
||||
st.Push('(')
|
||||
|
||||
case qcode.OpNot:
|
||||
//fmt.Printf("1> %s %d %s %s\n", val.Op, len(val.Children), val.Children[0].Op, val.Children[1].Op)
|
||||
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
|
||||
@ -1028,9 +1046,9 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
|
||||
case qcode.OpLesserThan:
|
||||
io.WriteString(c.w, `<`)
|
||||
case qcode.OpIn:
|
||||
io.WriteString(c.w, `IN`)
|
||||
io.WriteString(c.w, `= ANY`)
|
||||
case qcode.OpNotIn:
|
||||
io.WriteString(c.w, `NOT IN`)
|
||||
io.WriteString(c.w, `!= ANY`)
|
||||
case qcode.OpLike:
|
||||
io.WriteString(c.w, `LIKE`)
|
||||
case qcode.OpNotLike:
|
||||
@ -1080,12 +1098,13 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ti.TSVCol.Name)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `) @@ websearch_to_tsquery('{{`)
|
||||
io.WriteString(c.w, `) @@ websearch_to_tsquery(`)
|
||||
} else {
|
||||
io.WriteString(c.w, `) @@ to_tsquery('{{`)
|
||||
io.WriteString(c.w, `) @@ to_tsquery(`)
|
||||
}
|
||||
io.WriteString(c.w, ex.Val)
|
||||
io.WriteString(c.w, `}}'))`)
|
||||
c.renderValueExp(Param{Name: ex.Val, Type: "string"})
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
return nil
|
||||
|
||||
default:
|
||||
@ -1171,15 +1190,25 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *
|
||||
val, ok := vars[ex.Val]
|
||||
switch {
|
||||
case ok && strings.HasPrefix(val, "sql:"):
|
||||
io.WriteString(c.w, ` (`)
|
||||
io.WriteString(c.w, val[4:])
|
||||
io.WriteString(c.w, `(`)
|
||||
c.renderVar(val[4:], c.renderValueExp)
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
case ok:
|
||||
squoted(c.w, val)
|
||||
|
||||
case ex.Op == qcode.OpIn || ex.Op == qcode.OpNotIn:
|
||||
io.WriteString(c.w, `(ARRAY(SELECT json_array_elements_text(`)
|
||||
c.renderValueExp(Param{Name: ex.Val, Type: col.Type, IsArray: true})
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
io.WriteString(c.w, ` :: `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
io.WriteString(c.w, `[])`)
|
||||
return
|
||||
|
||||
default:
|
||||
io.WriteString(c.w, ` '{{`)
|
||||
io.WriteString(c.w, ex.Val)
|
||||
io.WriteString(c.w, `}}'`)
|
||||
c.renderValueExp(Param{Name: ex.Val, Type: col.Type, IsArray: false})
|
||||
}
|
||||
|
||||
case qcode.ValRef:
|
||||
@ -1193,7 +1222,55 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *
|
||||
io.WriteString(c.w, col.Type)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fn string) int {
|
||||
func (c *compilerContext) renderValueExp(p Param) {
|
||||
io.WriteString(c.w, `$`)
|
||||
if v, ok := c.md.pindex[p.Name]; ok {
|
||||
int32String(c.w, int32(v))
|
||||
|
||||
} else {
|
||||
c.md.Params = append(c.md.Params, p)
|
||||
n := len(c.md.Params)
|
||||
|
||||
if c.md.pindex == nil {
|
||||
c.md.pindex = make(map[string]int)
|
||||
}
|
||||
c.md.pindex[p.Name] = n
|
||||
int32String(c.w, int32(n))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderVar(vv string, fn func(Param)) {
|
||||
f, s := -1, 0
|
||||
|
||||
for i := range vv {
|
||||
v := vv[i]
|
||||
switch {
|
||||
case (i > 0 && vv[i-1] != '\\' && v == '$') || v == '$':
|
||||
if (i - s) > 0 {
|
||||
io.WriteString(c.w, vv[s:i])
|
||||
}
|
||||
f = i
|
||||
|
||||
case (v < 'a' && v > 'z') &&
|
||||
(v < 'A' && v > 'Z') &&
|
||||
(v < '0' && v > '9') &&
|
||||
v != '_' &&
|
||||
f != -1 &&
|
||||
(i-f) > 1:
|
||||
fn(Param{Name: vv[f+1 : i]})
|
||||
s = i
|
||||
f = -1
|
||||
}
|
||||
}
|
||||
|
||||
if f != -1 && (len(vv)-f) > 1 {
|
||||
fn(Param{Name: vv[f+1:]})
|
||||
} else {
|
||||
io.WriteString(c.w, vv[s:])
|
||||
}
|
||||
}
|
||||
|
||||
func funcPrefixLen(fm map[string]*DBFunction, fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
@ -1218,6 +1295,14 @@ func funcPrefixLen(fn string) int {
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
fnLen := len(fn)
|
||||
|
||||
for k := range fm {
|
||||
kLen := len(k)
|
||||
if kLen < fnLen && k[0] == fn[0] && strings.HasPrefix(fn, k) && fn[kLen] == '_' {
|
||||
return kLen + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -1236,7 +1321,7 @@ func aliasWithID(w io.Writer, alias string, id int32) {
|
||||
io.WriteString(w, ` AS "`)
|
||||
io.WriteString(w, alias)
|
||||
io.WriteString(w, `_`)
|
||||
int2string(w, id)
|
||||
int32String(w, id)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
@ -1253,7 +1338,7 @@ func colWithTableID(w io.Writer, table string, id int32, col string) {
|
||||
io.WriteString(w, table)
|
||||
if id >= 0 {
|
||||
io.WriteString(w, `_`)
|
||||
int2string(w, id)
|
||||
int32String(w, id)
|
||||
}
|
||||
io.WriteString(w, `"."`)
|
||||
io.WriteString(w, col)
|
||||
@ -1272,26 +1357,6 @@ func squoted(w io.Writer, identifier string) {
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
|
||||
const charset = "0123456789"
|
||||
|
||||
func int2string(w io.Writer, val int32) {
|
||||
if val < 10 {
|
||||
w.Write([]byte{charset[val]})
|
||||
return
|
||||
}
|
||||
|
||||
temp := int32(0)
|
||||
val2 := val
|
||||
for val2 > 0 {
|
||||
temp *= 10
|
||||
temp += val2 % 10
|
||||
val2 = int32(float64(val2 / 10))
|
||||
}
|
||||
|
||||
val3 := temp
|
||||
for val3 > 0 {
|
||||
d := val3 % 10
|
||||
val3 /= 10
|
||||
w.Write([]byte{charset[d]})
|
||||
}
|
||||
func int32String(w io.Writer, val int32) {
|
||||
io.WriteString(w, strconv.FormatInt(int64(val), 10))
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -32,6 +32,20 @@ func withComplexArgs(t *testing.T) {
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereIn(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { in: $list } }) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"list": json.RawMessage(`[1,2,3]`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func withWhereAndList(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
@ -367,6 +381,7 @@ func blockedFunctions(t *testing.T) {
|
||||
|
||||
func TestCompileQuery(t *testing.T) {
|
||||
t.Run("withComplexArgs", withComplexArgs)
|
||||
t.Run("withWhereIn", withWhereIn)
|
||||
t.Run("withWhereAndList", withWhereAndList)
|
||||
t.Run("withWhereIsNull", withWhereIsNull)
|
||||
t.Run("withWhereMultiOr", withWhereMultiOr)
|
||||
@ -429,7 +444,7 @@ func BenchmarkCompile(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
_, err = pcompile.Compile(w, qc, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -450,7 +465,7 @@ func BenchmarkCompileParallel(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
_, err = pcompile.Compile(w, qc, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -11,17 +11,20 @@ type DBSchema struct {
|
||||
ver int
|
||||
t map[string]*DBTableInfo
|
||||
rm map[string]map[string]*DBRel
|
||||
fm map[string]*DBFunction
|
||||
}
|
||||
|
||||
type DBTableInfo struct {
|
||||
Name string
|
||||
Type string
|
||||
Singular bool
|
||||
IsSingular bool
|
||||
Columns []DBColumn
|
||||
PrimaryCol *DBColumn
|
||||
TSVCol *DBColumn
|
||||
ColMap map[string]*DBColumn
|
||||
ColIDMap map[int16]*DBColumn
|
||||
Singular string
|
||||
Plural string
|
||||
}
|
||||
|
||||
type RelType int
|
||||
@ -54,8 +57,10 @@ type DBRel struct {
|
||||
|
||||
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
|
||||
schema := &DBSchema{
|
||||
ver: info.Version,
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
fm: make(map[string]*DBFunction, len(info.Functions)),
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
@ -79,6 +84,12 @@ func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
|
||||
}
|
||||
}
|
||||
|
||||
for k, f := range info.Functions {
|
||||
if len(f.Params) == 1 {
|
||||
schema.fm[strings.ToLower(f.Name)] = &info.Functions[k]
|
||||
}
|
||||
}
|
||||
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
@ -89,23 +100,28 @@ func (s *DBSchema) addTable(
|
||||
colidmap := make(map[int16]*DBColumn, len(cols))
|
||||
|
||||
singular := flect.Singularize(t.Key)
|
||||
plural := flect.Pluralize(t.Key)
|
||||
|
||||
s.t[singular] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Type: t.Type,
|
||||
Singular: true,
|
||||
IsSingular: true,
|
||||
Columns: cols,
|
||||
ColMap: colmap,
|
||||
ColIDMap: colidmap,
|
||||
Singular: singular,
|
||||
Plural: plural,
|
||||
}
|
||||
|
||||
plural := flect.Pluralize(t.Key)
|
||||
s.t[plural] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Type: t.Type,
|
||||
Singular: false,
|
||||
IsSingular: false,
|
||||
Columns: cols,
|
||||
ColMap: colmap,
|
||||
ColIDMap: colidmap,
|
||||
Singular: singular,
|
||||
Plural: plural,
|
||||
}
|
||||
|
||||
if al, ok := aliases[t.Key]; ok {
|
||||
@ -364,6 +380,14 @@ func (s *DBSchema) updateSchemaOTMT(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTableNames() []string {
|
||||
var names []string
|
||||
for name := range s.t {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
|
||||
t, ok := s.t[table]
|
||||
if !ok {
|
||||
@ -424,3 +448,11 @@ func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetFunctions() []*DBFunction {
|
||||
var funcs []*DBFunction
|
||||
for _, f := range s.fm {
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
return funcs
|
||||
}
|
||||
|
@ -13,10 +13,11 @@ type DBInfo struct {
|
||||
Version int
|
||||
Tables []DBTable
|
||||
Columns [][]DBColumn
|
||||
colmap map[string]map[string]*DBColumn
|
||||
Functions []DBFunction
|
||||
colMap map[string]map[string]*DBColumn
|
||||
}
|
||||
|
||||
func GetDBInfo(db *sql.DB) (*DBInfo, error) {
|
||||
func GetDBInfo(db *sql.DB, schema string) (*DBInfo, error) {
|
||||
di := &DBInfo{}
|
||||
var version string
|
||||
|
||||
@ -30,46 +31,61 @@ func GetDBInfo(db *sql.DB) (*DBInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Tables, err = GetTables(db)
|
||||
di.Tables, err = GetTables(db, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.colmap = make(map[string]map[string]*DBColumn, len(di.Tables))
|
||||
|
||||
for i, t := range di.Tables {
|
||||
cols, err := GetColumns(db, "public", t.Name)
|
||||
for _, t := range di.Tables {
|
||||
cols, err := GetColumns(db, schema, t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Columns = append(di.Columns, cols)
|
||||
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for n, c := range di.Columns[i] {
|
||||
di.colmap[t.Key][c.Key] = &di.Columns[i][n]
|
||||
}
|
||||
|
||||
di.colMap = newColMap(di.Tables, di.Columns)
|
||||
|
||||
di.Functions, err = GetFunctions(db, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func newColMap(tables []DBTable, columns [][]DBColumn) map[string]map[string]*DBColumn {
|
||||
cm := make(map[string]map[string]*DBColumn, len(tables))
|
||||
|
||||
for i, t := range tables {
|
||||
cols := columns[i]
|
||||
cm[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for n, c := range cols {
|
||||
cm[t.Key][c.Key] = &columns[i][n]
|
||||
}
|
||||
}
|
||||
|
||||
return cm
|
||||
}
|
||||
|
||||
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
|
||||
t.ID = di.Tables[len(di.Tables)-1].ID
|
||||
|
||||
di.Tables = append(di.Tables, t)
|
||||
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
di.colMap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
cols[i].ID = int16(i)
|
||||
c := &cols[i]
|
||||
di.colmap[t.Key][c.Key] = c
|
||||
di.colMap[t.Key][c.Key] = c
|
||||
}
|
||||
di.Columns = append(di.Columns, cols)
|
||||
}
|
||||
|
||||
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
|
||||
v, ok := di.colmap[strings.ToLower(table)][strings.ToLower(column)]
|
||||
v, ok := di.colMap[strings.ToLower(table)][strings.ToLower(column)]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
@ -80,7 +96,7 @@ type DBTable struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
func GetTables(db *sql.DB) ([]DBTable, error) {
|
||||
func GetTables(db *sql.DB, schema string) ([]DBTable, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
c.relname as "name",
|
||||
@ -92,14 +108,12 @@ SELECT
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relkind IN ('r','v','m','f','')
|
||||
AND n.nspname <> ('pg_catalog')
|
||||
AND n.nspname <> ('information_schema')
|
||||
AND n.nspname !~ ('^pg_toast')
|
||||
AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
AND n.nspname = $1
|
||||
AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
|
||||
var tables []DBTable
|
||||
|
||||
rows, err := db.Query(sqlStmt)
|
||||
rows, err := db.Query(sqlStmt, schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching tables: %s", err)
|
||||
}
|
||||
@ -237,6 +251,71 @@ ORDER BY id;`
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
type DBFunction struct {
|
||||
Name string
|
||||
Params []DBFuncParam
|
||||
}
|
||||
|
||||
type DBFuncParam struct {
|
||||
ID int
|
||||
Name sql.NullString
|
||||
Type string
|
||||
}
|
||||
|
||||
func GetFunctions(db *sql.DB, schema string) ([]DBFunction, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
routines.routine_name,
|
||||
parameters.specific_name,
|
||||
parameters.data_type,
|
||||
parameters.parameter_name,
|
||||
parameters.ordinal_position
|
||||
FROM
|
||||
information_schema.routines
|
||||
RIGHT JOIN
|
||||
information_schema.parameters
|
||||
ON (routines.specific_name = parameters.specific_name and parameters.ordinal_position IS NOT NULL)
|
||||
WHERE
|
||||
routines.specific_schema = $1
|
||||
ORDER BY
|
||||
routines.routine_name, parameters.ordinal_position;`
|
||||
|
||||
rows, err := db.Query(sqlStmt, schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching functions: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var funcs []DBFunction
|
||||
fm := make(map[string]int)
|
||||
|
||||
parameterIndex := 1
|
||||
for rows.Next() {
|
||||
var fn, fid string
|
||||
fp := DBFuncParam{}
|
||||
|
||||
err = rows.Scan(&fn, &fid, &fp.Type, &fp.Name, &fp.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !fp.Name.Valid {
|
||||
fp.Name.String = string(parameterIndex)
|
||||
fp.Name.Valid = true
|
||||
}
|
||||
|
||||
if i, ok := fm[fid]; ok {
|
||||
funcs[i].Params = append(funcs[i].Params, fp)
|
||||
} else {
|
||||
funcs = append(funcs, DBFunction{Name: fn, Params: []DBFuncParam{fp}})
|
||||
fm[fid] = len(funcs) - 1
|
||||
}
|
||||
parameterIndex++
|
||||
}
|
||||
|
||||
return funcs, nil
|
||||
}
|
||||
|
||||
// func GetValType(type string) qcode.ValType {
|
||||
// switch {
|
||||
// case "bigint", "integer", "smallint", "numeric", "bigserial":
|
||||
|
@ -1,11 +1,10 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getTestSchema() *DBSchema {
|
||||
func GetTestDBInfo() *DBInfo {
|
||||
tables := []DBTable{
|
||||
DBTable{Name: "customers", Type: "table"},
|
||||
DBTable{Name: "users", Type: "table"},
|
||||
@ -74,36 +73,19 @@ func getTestSchema() *DBSchema {
|
||||
}
|
||||
}
|
||||
|
||||
schema := &DBSchema{
|
||||
ver: 110000,
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
return &DBInfo{
|
||||
Version: 110000,
|
||||
Tables: tables,
|
||||
Columns: columns,
|
||||
Functions: []DBFunction{},
|
||||
colMap: newColMap(tables, columns),
|
||||
}
|
||||
}
|
||||
|
||||
func GetTestSchema() (*DBSchema, error) {
|
||||
aliases := map[string][]string{
|
||||
"users": []string{"mes"},
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.addTable(t, columns[i], aliases)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.firstDegreeRels(t, columns[i])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.secondDegreeRels(t, columns[i])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return schema
|
||||
return NewDBSchema(GetTestDBInfo(), aliases)
|
||||
}
|
@ -1,25 +1,25 @@
|
||||
=== RUN TestCompileInsert
|
||||
=== RUN TestCompileInsert/simpleInsert
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/singleInsert
|
||||
WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description", "price", "user_id") SELECT "t"."name", "t"."description", "t"."price", "t"."user_id" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description", "price", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'user_id' AS bigint) FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/bulkInsert
|
||||
WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/simpleInsertWithPresets
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, '{{user_id}}' :: bigint FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, $2 :: bigint FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertManyToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "customer_id", "product_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "customers"."id", "products"."id" FROM "_sg_input" i, "customers", "products", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "product_id", "customer_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "products"."id", "customers"."id" FROM "_sg_input" i, "products", "customers", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i RETURNING *), "products" AS (INSERT INTO "products" ("name", "price") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "product_id", "customer_id") SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone), "products"."id", "customers"."id" FROM "_sg_input" i, "products", "customers" RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "price") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i RETURNING *), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "customer_id", "product_id") SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone), "customers"."id", "products"."id" FROM "_sg_input" i, "customers", "products" RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j->'product') t RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "users"."id" FROM "_sg_input" i, "users" RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOne
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "users"."id" FROM "_sg_input" i, "users" RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToManyWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i RETURNING *), "products" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user", "__sj_2"."json" AS "tags" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "_x_users"."id" FROM "_sg_input" i, "_x_users" RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user", "__sj_2"."json" AS "tags" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnectArray
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "_x_users"."id" FROM "_sg_input" i, "_x_users" RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileInsert (0.02s)
|
||||
--- PASS: TestCompileInsert/simpleInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/singleInsert (0.00s)
|
||||
@ -33,13 +33,13 @@ WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id"
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnectArray (0.00s)
|
||||
=== RUN TestCompileMutate
|
||||
=== RUN TestCompileMutate/singleUpsert
|
||||
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/singleUpsertWhere
|
||||
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > '3' :: numeric(7,2)) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > '3' :: numeric(7,2)) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/bulkUpsert
|
||||
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/delete
|
||||
WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '1' :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '1' :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileMutate (0.01s)
|
||||
--- PASS: TestCompileMutate/singleUpsert (0.00s)
|
||||
--- PASS: TestCompileMutate/singleUpsertWhere (0.00s)
|
||||
@ -47,55 +47,58 @@ WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > '0' :
|
||||
--- PASS: TestCompileMutate/delete (0.00s)
|
||||
=== RUN TestCompileQuery
|
||||
=== RUN TestCompileQuery/withComplexArgs
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT DISTINCT ON ("products"."price") "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."id") < '28' :: bigint) AND (("products"."id") >= '20' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) ORDER BY "products"."price" DESC LIMIT ('30') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT DISTINCT ON ("products"."price") "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."id") < '28' :: bigint) AND (("products"."id") >= '20' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) ORDER BY "products"."price" DESC LIMIT ('30') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereIn
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = ANY (ARRAY(SELECT json_array_elements_text($1)) :: bigint[])))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereAndList
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereIsNull
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereMultiOr
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND ((("products"."price") < '20' :: numeric(7,2)) OR (("products"."price") > '10' :: numeric(7,2)) OR NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND ((("products"."price") < '20' :: numeric(7,2)) OR (("products"."price") > '10' :: numeric(7,2)) OR NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/fetchByID
|
||||
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '{{id}}' :: bigint))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = $1 :: bigint))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/searchQuery
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."search_rank" AS "search_rank", "products_0"."search_headline_description" AS "search_headline_description" FROM (SELECT "products"."id", "products"."name", ts_rank("products"."tsv", websearch_to_tsquery('{{query}}')) AS "search_rank", ts_headline("products"."description", websearch_to_tsquery('{{query}}')) AS "search_headline_description" FROM "products" WHERE ((("products"."tsv") @@ websearch_to_tsquery('{{query}}'))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."search_rank" AS "search_rank", "products_0"."search_headline_description" AS "search_headline_description" FROM (SELECT "products"."id", "products"."name", ts_rank("products"."tsv", websearch_to_tsquery($1)) AS "search_rank", ts_headline("products"."description", websearch_to_tsquery($1)) AS "search_headline_description" FROM "products" WHERE ((("products"."tsv") @@ websearch_to_tsquery($1))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToMany
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."email" AS "email", "__sj_1"."json" AS "products" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."email" AS "email", "__sj_1"."json" AS "products" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToManyReverse
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "__sj_1"."json" AS "users" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."email" AS "email" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "__sj_1"."json" AS "users" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."email" AS "email" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToManyArray
|
||||
SELECT jsonb_build_object('tags', "__sj_0"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "products_2"."name" AS "name", "products_2"."price" AS "price", "__sj_3"."json" AS "tags" FROM (SELECT "products"."name", "products"."price", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3") AS "json"FROM (SELECT "tags_3"."id" AS "id", "tags_3"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_2"."tags"))) LIMIT ('20') :: integer) AS "tags_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "tags_0"."name" AS "name", "__sj_1"."json" AS "product" FROM (SELECT "tags"."name", "tags"."slug" FROM "tags" LIMIT ('20') :: integer) AS "tags_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" WHERE ((("tags_0"."slug") = any ("products"."tags"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('tags', "__sj_0"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "products_2"."name" AS "name", "products_2"."price" AS "price", "__sj_3"."json" AS "tags" FROM (SELECT "products"."name", "products"."price", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "tags_3"."id" AS "id", "tags_3"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_2"."tags"))) LIMIT ('20') :: integer) AS "tags_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "tags_0"."name" AS "name", "__sj_1"."json" AS "product" FROM (SELECT "tags"."name", "tags"."slug" FROM "tags" LIMIT ('20') :: integer) AS "tags_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" WHERE ((("tags_0"."slug") = any ("products"."tags"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/manyToMany
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "__sj_1"."json" AS "customers" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name", "__sj_1"."json" AS "customers" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/manyToManyReverse
|
||||
SELECT jsonb_build_object('customers', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "__sj_1"."json" AS "products" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('customers', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "__sj_1"."json" AS "products" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunction
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionBlockedByCol
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionDisabled
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionWithFilter
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") > '10' :: bigint))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") > '10' :: bigint))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/syntheticTables
|
||||
SELECT jsonb_build_object('me', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('me', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = $1 :: bigint)) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/queryWithVariables
|
||||
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") = '{{product_price}}' :: numeric(7,2)) AND (("products"."id") = '{{product_id}}' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") = $1 :: numeric(7,2)) AND (("products"."id") = $2 :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereOnRelations
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")) AND ((("products"."price") > '3' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")) AND ((("products"."price") > '3' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/multiRoot
|
||||
SELECT jsonb_build_object('customer', "__sj_0"."json", 'user', "__sj_1"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "products_2"."id" AS "id", "products_2"."name" AS "name", "__sj_3"."json" AS "customers", "__sj_4"."json" AS "customer" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_4") AS "json"FROM (SELECT "customers_4"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('1') :: integer) AS "customers_4") AS "__sr_4") AS "__sj_4" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3") AS "json"FROM (SELECT "customers_3"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1", (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "customers_0"."id" AS "id" FROM (SELECT "customers"."id" FROM "customers" LIMIT ('1') :: integer) AS "customers_0") AS "__sr_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('customer', "__sj_0"."json", 'user', "__sj_1"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "products_2"."id" AS "id", "products_2"."name" AS "name", "__sj_3"."json" AS "customers", "__sj_4"."json" AS "customer" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_4".*) AS "json"FROM (SELECT "customers_4"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('1') :: integer) AS "customers_4") AS "__sr_4") AS "__sj_4" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "customers_3"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1", (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "customers_0"."id" AS "id" FROM (SELECT "customers"."id" FROM "customers" LIMIT ('1') :: integer) AS "customers_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/jsonColumnAsTable
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "tag_count" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "tag_count_1"."count" AS "count", "__sj_2"."json" AS "tags" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "tags_2"."name" AS "name" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "tag_count" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "tag_count_1"."count" AS "count", "__sj_2"."json" AS "tags" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "tags_2"."name" AS "name" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withCursor
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json", 'products_cursor', "__sj_0"."cursor") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json", CONCAT_WS(',', max("__cur_0"), max("__cur_1")) as "cursor" FROM (SELECT to_jsonb("__sr_0") - '__cur_0' - '__cur_1' AS "json", "__cur_0", "__cur_1"FROM (SELECT "products_0"."name" AS "name", LAST_VALUE("products_0"."price") OVER() AS "__cur_0", LAST_VALUE("products_0"."id") OVER() AS "__cur_1" FROM (WITH "__cur" AS (SELECT a[1] as "price", a[2] as "id" FROM string_to_array('{{cursor}}', ',') as a) SELECT "products"."name", "products"."id", "products"."price" FROM "products", "__cur" WHERE (((("products"."price") < "__cur"."price" :: numeric(7,2)) OR ((("products"."price") = "__cur"."price" :: numeric(7,2)) AND (("products"."id") > "__cur"."id" :: bigint)))) ORDER BY "products"."price" DESC, "products"."id" ASC LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json", 'products_cursor', "__sj_0"."cursor") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json", CONCAT_WS(',', max("__cur_0"), max("__cur_1")) as "cursor" FROM (SELECT to_jsonb("__sr_0".*) - '__cur_0' - '__cur_1' AS "json", "__cur_0", "__cur_1"FROM (SELECT "products_0"."name" AS "name", LAST_VALUE("products_0"."price") OVER() AS "__cur_0", LAST_VALUE("products_0"."id") OVER() AS "__cur_1" FROM (WITH "__cur" AS (SELECT a[1] as "price", a[2] as "id" FROM string_to_array($1, ',') as a) SELECT "products"."name", "products"."id", "products"."price" FROM "products", "__cur" WHERE (((("products"."price") < "__cur"."price" :: numeric(7,2)) OR ((("products"."price") = "__cur"."price" :: numeric(7,2)) AND (("products"."id") > "__cur"."id" :: bigint)))) ORDER BY "products"."price" DESC, "products"."id" ASC LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/nullForAuthRequiredInAnon
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", NULL AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", NULL AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/blockedQuery
|
||||
SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/blockedFunctions
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."email" AS "email" FROM (SELECT , "users"."email" FROM "users" WHERE (false) GROUP BY "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
--- PASS: TestCompileQuery (0.02s)
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."email" AS "email" FROM (SELECT , "users"."email" FROM "users" WHERE (false) GROUP BY "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
--- PASS: TestCompileQuery (0.03s)
|
||||
--- PASS: TestCompileQuery/withComplexArgs (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereIn (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereAndList (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereIsNull (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereMultiOr (0.00s)
|
||||
@ -121,23 +124,23 @@ SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coa
|
||||
--- PASS: TestCompileQuery/blockedFunctions (0.00s)
|
||||
=== RUN TestCompileUpdate
|
||||
=== RUN TestCompileUpdate/singleUpdate
|
||||
WITH "_sg_input" AS (SELECT '{{update}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "description") = (SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE ((("products"."id") = '1' :: bigint) AND (("products"."id") = '{{id}}' :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (UPDATE "products" SET ("name", "description") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i) WHERE ((("products"."id") = '1' :: bigint) AND (("products"."id") = $2 :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/simpleUpdateWithPresets
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "updated_at") = (SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."user_id") = '{{user_id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "updated_at") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), 'now' :: timestamp without time zone FROM "_sg_input" i) WHERE (("products"."user_id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateManyToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = '{{id}}' :: bigint) RETURNING "purchases".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = '{{id}}' :: bigint) RETURNING "purchases".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("purchases"."id") = $2 :: bigint) RETURNING "purchases".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("purchases"."id") = $2 :: bigint) RETURNING "purchases".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = '8' :: bigint) RETURNING "users".*), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "users" WHERE (("products"."user_id") = ("users"."id") AND "products"."id"= ((i.j->'product'->'where'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("users"."id") = '8' :: bigint) RETURNING "users".*), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) FROM "users" WHERE (("products"."user_id") = ("users"."id") AND "products"."id"= ((i.j->'product'->'where'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOne
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{id}}' :: bigint) RETURNING "products".*), "users" AS (UPDATE "users" SET ("email") = (SELECT "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t) FROM "products" WHERE (("users"."id") = ("products"."user_id")) RETURNING "users".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*), "users" AS (UPDATE "users" SET ("email") = (SELECT CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i) FROM "products" WHERE (("users"."id") = ("products"."user_id")) RETURNING "users".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToManyWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = '{{id}}' :: bigint) RETURNING "users".*), "products_c" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*), "products_d" AS ( UPDATE "products" SET "user_id" = NULL FROM "users" WHERE ("products"."id"= ((i.j->'product'->'disconnect'->>'id'))::bigint) RETURNING "products".*), "products" AS (SELECT * FROM "products_c" UNION ALL SELECT * FROM "products_d") SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("users"."id") = $2 :: bigint) RETURNING "users".*), "products_c" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*), "products_d" AS ( UPDATE "products" SET "user_id" = NULL FROM "users" WHERE ("products"."id"= ((i.j->'product'->'disconnect'->>'id'))::bigint) RETURNING "products".*), "products" AS (SELECT * FROM "products_c" UNION ALL SELECT * FROM "products_d") SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{product_id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{product_id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), "_x_users"."id" FROM "_sg_input" i, "_x_users") WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), "_x_users"."id" FROM "_sg_input" i, "_x_users") WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithDisconnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{id}}' :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), "_x_users"."id" FROM "_sg_input" i, "_x_users") WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileUpdate (0.02s)
|
||||
--- PASS: TestCompileUpdate/singleUpdate (0.00s)
|
||||
--- PASS: TestCompileUpdate/simpleUpdateWithPresets (0.00s)
|
||||
@ -148,4 +151,4 @@ WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FR
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithConnect (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithDisconnect (0.00s)
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/psql 0.320s
|
||||
ok github.com/dosco/super-graph/core/internal/psql (cached)
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
func (c *compilerContext) renderUpdate(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
update, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
@ -21,9 +21,10 @@ func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
|
||||
io.WriteString(c.w, qc.ActionVar)
|
||||
io.WriteString(c.w, `}}' :: json AS j)`)
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT `)
|
||||
c.renderValueExp(Param{Name: qc.ActionVar, Type: "json"})
|
||||
// io.WriteString(c.w, qc.ActionVar)
|
||||
io.WriteString(c.w, ` :: json AS j)`)
|
||||
|
||||
st := util.NewStack()
|
||||
st.Push(kvitem{_type: itemUpdate, key: ti.Name, val: update, ti: ti})
|
||||
@ -84,32 +85,16 @@ func (c *compilerContext) renderUpdateStmt(w io.Writer, qc *qcode.QCode, item re
|
||||
io.WriteString(w, `UPDATE `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, ` SET (`)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, false)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, false)
|
||||
renderNestedUpdateRelColumns(w, item.kvitem, false)
|
||||
|
||||
io.WriteString(w, `) = (SELECT `)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, true)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, true)
|
||||
renderNestedUpdateRelColumns(w, item.kvitem, true)
|
||||
|
||||
io.WriteString(w, ` FROM "_sg_input" i, `)
|
||||
io.WriteString(w, ` FROM "_sg_input" i`)
|
||||
renderNestedUpdateRelTables(w, item.kvitem)
|
||||
|
||||
if item.array {
|
||||
io.WriteString(w, `json_populate_recordset`)
|
||||
} else {
|
||||
io.WriteString(w, `json_populate_record`)
|
||||
}
|
||||
|
||||
io.WriteString(w, `(NULL::`)
|
||||
io.WriteString(w, ti.Name)
|
||||
|
||||
if len(item.path) == 0 {
|
||||
io.WriteString(w, `, i.j) t)`)
|
||||
} else {
|
||||
io.WriteString(w, `, i.j->`)
|
||||
joinPath(w, item.path)
|
||||
io.WriteString(w, `) t) `)
|
||||
}
|
||||
io.WriteString(w, `) `)
|
||||
|
||||
if item.id != 0 {
|
||||
// Render sql to set id values if child-to-parent
|
||||
@ -137,11 +122,13 @@ func (c *compilerContext) renderUpdateStmt(w io.Writer, qc *qcode.QCode, item re
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else {
|
||||
io.WriteString(w, ` WHERE `)
|
||||
if qc.Selects[0].Where != nil {
|
||||
io.WriteString(w, `WHERE `)
|
||||
if err := c.renderWhere(&qc.Selects[0], ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, ti.Name)
|
||||
@ -202,17 +189,18 @@ func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, `, "_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `", `)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDelete(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
func (c *compilerContext) renderDelete(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
|
||||
io.WriteString(c.w, `WITH `)
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@ -223,7 +223,7 @@ func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
// sql := `WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{
|
||||
|
@ -1,13 +0,0 @@
|
||||
package psql
|
||||
|
||||
import "regexp"
|
||||
|
||||
func NewVariables(varlist map[string]string) map[string]string {
|
||||
re := regexp.MustCompile(`(?mi)\$([a-zA-Z0-9_.]+)`)
|
||||
vars := make(map[string]string, len(varlist))
|
||||
|
||||
for k, v := range varlist {
|
||||
vars[k] = re.ReplaceAllString(v, `{{$1}}`)
|
||||
}
|
||||
return vars
|
||||
}
|
11
core/internal/qcode/bench.10
Normal file
11
core/internal/qcode/bench.10
Normal file
@ -0,0 +1,11 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile-16 120888 9236 ns/op 3755 B/op 28 allocs/op
|
||||
BenchmarkQCompileP-16 502248 2620 ns/op 3795 B/op 28 allocs/op
|
||||
BenchmarkParse-16 128370 9294 ns/op 3902 B/op 18 allocs/op
|
||||
BenchmarkParseP-16 575752 2340 ns/op 3903 B/op 18 allocs/op
|
||||
BenchmarkSchemaParse-16 212048 5779 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP-16 630918 1686 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 7.710s
|
17
core/internal/qcode/bench.9
Normal file
17
core/internal/qcode/bench.9
Normal file
@ -0,0 +1,17 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile
|
||||
BenchmarkQCompile-16 129614 8649 ns/op 3756 B/op 28 allocs/op
|
||||
BenchmarkQCompileP
|
||||
BenchmarkQCompileP-16 487488 2525 ns/op 3792 B/op 28 allocs/op
|
||||
BenchmarkParse
|
||||
BenchmarkParse-16 127582 8731 ns/op 3902 B/op 18 allocs/op
|
||||
BenchmarkParseP
|
||||
BenchmarkParseP-16 561373 2223 ns/op 3903 B/op 18 allocs/op
|
||||
BenchmarkSchemaParse
|
||||
BenchmarkSchemaParse-16 209142 5523 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP
|
||||
BenchmarkSchemaParseP-16 716437 1734 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 8.483s
|
@ -1,12 +1,12 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
DefaultBlock bool
|
||||
Blocklist []string
|
||||
}
|
||||
|
||||
@ -15,23 +15,27 @@ type QueryConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
DisableFunctions bool
|
||||
Block bool
|
||||
}
|
||||
|
||||
type InsertConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block bool
|
||||
}
|
||||
|
||||
type UpdateConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block bool
|
||||
}
|
||||
|
||||
type DeleteConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Block bool
|
||||
}
|
||||
|
||||
type TRConfig struct {
|
||||
@ -47,9 +51,8 @@ type trval struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
disable struct {
|
||||
funcs bool
|
||||
}
|
||||
disable struct{ funcs bool }
|
||||
block bool
|
||||
}
|
||||
|
||||
insert struct {
|
||||
@ -58,6 +61,7 @@ type trval struct {
|
||||
cols map[string]struct{}
|
||||
psmap map[string]string
|
||||
pslist []string
|
||||
block bool
|
||||
}
|
||||
|
||||
update struct {
|
||||
@ -66,12 +70,14 @@ type trval struct {
|
||||
cols map[string]struct{}
|
||||
psmap map[string]string
|
||||
pslist []string
|
||||
block bool
|
||||
}
|
||||
|
||||
delete struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
block bool
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,12 +131,3 @@ func mapToList(m map[string]string) []string {
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
var varRe = regexp.MustCompile(`\$([a-zA-Z0-9_]+)`)
|
||||
|
||||
func parsePresets(m map[string]string) map[string]string {
|
||||
for k, v := range m {
|
||||
m[k] = varRe.ReplaceAllString(v, `{{$1}}`)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
@ -201,6 +201,7 @@ func (p *Parser) peek(types ...itemType) bool {
|
||||
// if p.items[n]._type == itemEOF {
|
||||
// return false
|
||||
// }
|
||||
|
||||
if n >= len(p.items) {
|
||||
return false
|
||||
}
|
||||
@ -299,7 +300,7 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
|
||||
return nil, fmt.Errorf("too many fields (max %d)", maxFields)
|
||||
}
|
||||
|
||||
if p.peek(itemObjClose) {
|
||||
if p.peek(itemEOF, itemObjClose) {
|
||||
p.ignore()
|
||||
st.Pop()
|
||||
|
||||
@ -385,7 +386,7 @@ func (p *Parser) parseOpParams(args []Arg) ([]Arg, error) {
|
||||
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
|
||||
}
|
||||
|
||||
if p.peek(itemArgsClose) {
|
||||
if p.peek(itemEOF, itemArgsClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
@ -403,7 +404,7 @@ func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
|
||||
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
|
||||
}
|
||||
|
||||
if p.peek(itemArgsClose) {
|
||||
if p.peek(itemEOF, itemArgsClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
@ -470,7 +471,7 @@ func (p *Parser) parseObj() (*Node, error) {
|
||||
parent.Reset()
|
||||
|
||||
for {
|
||||
if p.peek(itemObjClose) {
|
||||
if p.peek(itemEOF, itemObjClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
@ -602,7 +603,7 @@ func (t parserType) String() string {
|
||||
// nodePool.Put(n)
|
||||
// freeList = append(freeList, Frees{n, loc})
|
||||
// } else {
|
||||
// fmt.Printf(">>>>(%d) RE_FREE %d %p %s %s\n", loc, freeList[j].loc, freeList[j].n, n.Name, n.Type)
|
||||
// fmt.Printf("(%d) RE_FREE %d %p %s %s\n", loc, freeList[j].loc, freeList[j].n, n.Name, n.Type)
|
||||
// }
|
||||
// }
|
||||
|
||||
|
@ -2,6 +2,7 @@ package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/chirino/graphql/schema"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -130,7 +131,7 @@ updateThread {
|
||||
}
|
||||
|
||||
var gql = []byte(`
|
||||
products(
|
||||
{products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
@ -148,7 +149,7 @@ var gql = []byte(`
|
||||
id
|
||||
name
|
||||
price
|
||||
}`)
|
||||
}}`)
|
||||
|
||||
func BenchmarkQCompile(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
@ -181,3 +182,59 @@ func BenchmarkQCompileP(b *testing.B) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := Parse(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseP(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := Parse(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSchemaParse(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
doc := schema.QueryDocument{}
|
||||
err := doc.Parse(string(gql))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSchemaParseP(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
doc := schema.QueryDocument{}
|
||||
err := doc.Parse(string(gql))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -172,6 +172,8 @@ const (
|
||||
type Compiler struct {
|
||||
tr map[string]map[string]*trval
|
||||
bl map[string]struct{}
|
||||
|
||||
defBlock bool
|
||||
}
|
||||
|
||||
var expPool = sync.Pool{
|
||||
@ -179,7 +181,7 @@ var expPool = sync.Pool{
|
||||
}
|
||||
|
||||
func NewCompiler(c Config) (*Compiler, error) {
|
||||
co := &Compiler{}
|
||||
co := &Compiler{defBlock: c.DefaultBlock}
|
||||
co.tr = make(map[string]map[string]*trval)
|
||||
co.bl = make(map[string]struct{}, len(c.Blocklist))
|
||||
|
||||
@ -218,6 +220,7 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
|
||||
}
|
||||
trv.query.cols = listToMap(trc.Query.Columns)
|
||||
trv.query.disable.funcs = trc.Query.DisableFunctions
|
||||
trv.query.block = trc.Query.Block
|
||||
|
||||
// insert config
|
||||
trv.insert.fil, trv.insert.filNU, err = compileFilter(trc.Insert.Filters)
|
||||
@ -225,8 +228,9 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
|
||||
return err
|
||||
}
|
||||
trv.insert.cols = listToMap(trc.Insert.Columns)
|
||||
trv.insert.psmap = parsePresets(trc.Insert.Presets)
|
||||
trv.insert.psmap = trc.Insert.Presets
|
||||
trv.insert.pslist = mapToList(trv.insert.psmap)
|
||||
trv.insert.block = trc.Insert.Block
|
||||
|
||||
// update config
|
||||
trv.update.fil, trv.update.filNU, err = compileFilter(trc.Update.Filters)
|
||||
@ -234,8 +238,9 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
|
||||
return err
|
||||
}
|
||||
trv.update.cols = listToMap(trc.Update.Columns)
|
||||
trv.update.psmap = parsePresets(trc.Update.Presets)
|
||||
trv.update.psmap = trc.Update.Presets
|
||||
trv.update.pslist = mapToList(trv.update.psmap)
|
||||
trv.update.block = trc.Update.Block
|
||||
|
||||
// delete config
|
||||
trv.delete.fil, trv.delete.filNU, err = compileFilter(trc.Delete.Filters)
|
||||
@ -243,6 +248,7 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
|
||||
return err
|
||||
}
|
||||
trv.delete.cols = listToMap(trc.Delete.Columns)
|
||||
trv.delete.block = trc.Delete.Block
|
||||
|
||||
singular := flect.Singularize(table)
|
||||
plural := flect.Pluralize(table)
|
||||
@ -328,17 +334,67 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
}
|
||||
|
||||
trv := com.getRole(role, field.Name)
|
||||
skipRender := false
|
||||
|
||||
if trv != nil {
|
||||
switch action {
|
||||
case QTQuery:
|
||||
if trv.query.block {
|
||||
skipRender = true
|
||||
}
|
||||
|
||||
case QTInsert:
|
||||
if trv.insert.block {
|
||||
return fmt.Errorf("%s, insert blocked: %s", role, field.Name)
|
||||
}
|
||||
|
||||
case QTUpdate:
|
||||
if trv.update.block {
|
||||
return fmt.Errorf("%s, update blocked: %s", role, field.Name)
|
||||
}
|
||||
|
||||
case QTDelete:
|
||||
if trv.delete.block {
|
||||
return fmt.Errorf("%s, delete blocked: %s", role, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
} else if role == "anon" {
|
||||
skipRender = com.defBlock
|
||||
}
|
||||
|
||||
selects = append(selects, Select{
|
||||
ID: id,
|
||||
ParentID: parentID,
|
||||
Name: field.Name,
|
||||
Children: make([]int32, 0, 5),
|
||||
Allowed: trv.allowedColumns(action),
|
||||
Functions: true,
|
||||
SkipRender: skipRender,
|
||||
})
|
||||
s := &selects[(len(selects) - 1)]
|
||||
|
||||
if len(field.Alias) != 0 {
|
||||
s.FieldName = field.Alias
|
||||
} else {
|
||||
s.FieldName = s.Name
|
||||
}
|
||||
|
||||
if s.ParentID == -1 {
|
||||
qc.Roots = append(qc.Roots, s.ID)
|
||||
} else {
|
||||
p := &selects[s.ParentID]
|
||||
p.Children = append(p.Children, s.ID)
|
||||
}
|
||||
|
||||
if skipRender {
|
||||
id++
|
||||
continue
|
||||
}
|
||||
|
||||
s.Children = make([]int32, 0, 5)
|
||||
s.Functions = true
|
||||
|
||||
if trv != nil {
|
||||
s.Allowed = trv.allowedColumns(action)
|
||||
|
||||
switch action {
|
||||
case QTQuery:
|
||||
s.Functions = !trv.query.disable.funcs
|
||||
@ -352,11 +408,6 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
s.PresetMap = trv.update.psmap
|
||||
s.PresetList = trv.update.pslist
|
||||
}
|
||||
|
||||
if len(field.Alias) != 0 {
|
||||
s.FieldName = field.Alias
|
||||
} else {
|
||||
s.FieldName = s.Name
|
||||
}
|
||||
|
||||
err := com.compileArgs(qc, s, field.Args, role)
|
||||
@ -367,13 +418,6 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
// Order is important AddFilters must come after compileArgs
|
||||
com.AddFilters(qc, s, role)
|
||||
|
||||
if s.ParentID == -1 {
|
||||
qc.Roots = append(qc.Roots, s.ID)
|
||||
} else {
|
||||
p := &selects[s.ParentID]
|
||||
p.Children = append(p.Children, s.ID)
|
||||
}
|
||||
|
||||
s.Cols = make([]Column, 0, len(field.Children))
|
||||
action = QTQuery
|
||||
|
||||
@ -413,14 +457,10 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
|
||||
func (com *Compiler) AddFilters(qc *QCode, sel *Select, role string) {
|
||||
var fil *Exp
|
||||
var nu bool
|
||||
var nu bool // need user_id (or not) in this filter
|
||||
|
||||
if trv, ok := com.tr[role][sel.Name]; ok {
|
||||
fil, nu = trv.filter(qc.Type)
|
||||
|
||||
} else if role == "anon" {
|
||||
// Tables not defined under the anon role will not be rendered
|
||||
sel.SkipRender = true
|
||||
}
|
||||
|
||||
if fil == nil {
|
||||
@ -811,14 +851,17 @@ func (com *Compiler) compileArgAfterBefore(sel *Select, arg *Arg, pt PagingType)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var zeroTrv = &trval{}
|
||||
// var zeroTrv = &trval{}
|
||||
|
||||
func (com *Compiler) getRole(role, field string) *trval {
|
||||
if trv, ok := com.tr[role][field]; ok {
|
||||
return trv
|
||||
} else {
|
||||
return zeroTrv
|
||||
}
|
||||
|
||||
return nil
|
||||
// } else {
|
||||
// return zeroTrv
|
||||
// }
|
||||
}
|
||||
|
||||
func AddFilter(sel *Select, fil *Exp) {
|
||||
@ -945,6 +988,9 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
ex.Op = OpDistinct
|
||||
ex.Val = node.Val
|
||||
default:
|
||||
if len(node.Children) == 0 {
|
||||
return nil, fmt.Errorf("[Where] invalid operation: %s", name)
|
||||
}
|
||||
pushChildren(st, node.exp, node)
|
||||
return nil, nil // skip node
|
||||
}
|
||||
@ -964,8 +1010,9 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
case NodeVar:
|
||||
ex.Type = ValVar
|
||||
default:
|
||||
return nil, fmt.Errorf("[Where] valid values include string, int, float, boolean and list: %s", node.Type)
|
||||
return nil, fmt.Errorf("[Where] invalid values for: %s", name)
|
||||
}
|
||||
|
||||
setWhereColName(ex, node)
|
||||
}
|
||||
|
||||
@ -984,10 +1031,15 @@ func setListVal(ex *Exp, node *Node) {
|
||||
case NodeFloat:
|
||||
ex.ListType = ValFloat
|
||||
}
|
||||
} else {
|
||||
ex.Val = node.Val
|
||||
return
|
||||
}
|
||||
|
||||
for i := range node.Children {
|
||||
ex.ListVal = append(ex.ListVal, node.Children[i].Val)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setWhereColName(ex *Exp, node *Node) {
|
||||
@ -1014,6 +1066,7 @@ func setWhereColName(ex *Exp, node *Node) {
|
||||
ex.Col = list[listlen-1]
|
||||
ex.NestedCols = list[:listlen]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setOrderByColName(ob *OrderBy, node *Node) {
|
||||
|
@ -1,12 +1,17 @@
|
||||
package qcode
|
||||
|
||||
func GetQType(gql string) QType {
|
||||
ic := false
|
||||
for i := range gql {
|
||||
b := gql[i]
|
||||
if b == '{' {
|
||||
switch {
|
||||
case b == '#':
|
||||
ic = true
|
||||
case b == '\n':
|
||||
ic = false
|
||||
case !ic && b == '{':
|
||||
return QTQuery
|
||||
}
|
||||
if al(b) {
|
||||
case !ic && al(b):
|
||||
switch b {
|
||||
case 'm', 'M':
|
||||
return QTMutation
|
||||
|
50
core/internal/qcode/utils_test.go
Normal file
50
core/internal/qcode/utils_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
package qcode
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGetQType(t *testing.T) {
|
||||
type args struct {
|
||||
gql string
|
||||
}
|
||||
type ts struct {
|
||||
name string
|
||||
args args
|
||||
want QType
|
||||
}
|
||||
tests := []ts{
|
||||
ts{
|
||||
name: "query",
|
||||
args: args{gql: " query {"},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "mutation",
|
||||
args: args{gql: " mutation {"},
|
||||
want: QTMutation,
|
||||
},
|
||||
ts{
|
||||
name: "default query",
|
||||
args: args{gql: " {"},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "default query with comment",
|
||||
args: args{gql: `# query is good
|
||||
{`},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "failed query with comment",
|
||||
args: args{gql: `# query is good query {`},
|
||||
want: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := GetQType(tt.args.gql); got != tt.want {
|
||||
t.Errorf("GetQType() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
490
core/introspec.go
Normal file
490
core/introspec.go
Normal file
@ -0,0 +1,490 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/chirino/graphql"
|
||||
"github.com/chirino/graphql/resolvers"
|
||||
"github.com/chirino/graphql/schema"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
var typeMap map[string]string = map[string]string{
|
||||
"smallint": "Int",
|
||||
"integer": "Int",
|
||||
"bigint": "Int",
|
||||
"smallserial": "Int",
|
||||
"serial": "Int",
|
||||
"bigserial": "Int",
|
||||
"decimal": "Float",
|
||||
"numeric": "Float",
|
||||
"real": "Float",
|
||||
"double precision": "Float",
|
||||
"money": "Float",
|
||||
"boolean": "Boolean",
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initGraphQLEgine() error {
|
||||
engine := graphql.New()
|
||||
engineSchema := engine.Schema
|
||||
dbSchema := sg.schema
|
||||
|
||||
if err := engineSchema.Parse(`enum OrderDirection { asc desc }`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gqltype := func(col psql.DBColumn) schema.Type {
|
||||
typeName := typeMap[strings.ToLower(col.Type)]
|
||||
if typeName == "" {
|
||||
typeName = "String"
|
||||
}
|
||||
var t schema.Type = &schema.TypeName{Name: typeName}
|
||||
if col.NotNull {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
query := &schema.Object{
|
||||
Name: "Query",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
mutation := &schema.Object{
|
||||
Name: "Mutation",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[query.Name] = query
|
||||
engineSchema.Types[mutation.Name] = mutation
|
||||
engineSchema.EntryPoints[schema.Query] = query
|
||||
engineSchema.EntryPoints[schema.Mutation] = mutation
|
||||
|
||||
//validGraphQLIdentifierRegex := regexp.MustCompile(`^[A-Za-z_][A-Za-z_0-9]*$`)
|
||||
|
||||
scalarExpressionTypesNeeded := map[string]bool{}
|
||||
tableNames := dbSchema.GetTableNames()
|
||||
funcs := dbSchema.GetFunctions()
|
||||
|
||||
for _, table := range tableNames {
|
||||
ti, err := dbSchema.GetTable(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ti.IsSingular {
|
||||
continue
|
||||
}
|
||||
|
||||
singularName := ti.Singular
|
||||
// if !validGraphQLIdentifierRegex.MatchString(singularName) {
|
||||
// return errors.New("table name is not a valid GraphQL identifier: " + singularName)
|
||||
// }
|
||||
pluralName := ti.Plural
|
||||
// if !validGraphQLIdentifierRegex.MatchString(pluralName) {
|
||||
// return errors.New("table name is not a valid GraphQL identifier: " + pluralName)
|
||||
// }
|
||||
|
||||
outputType := &schema.Object{
|
||||
Name: singularName + "Output",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[outputType.Name] = outputType
|
||||
|
||||
inputType := &schema.InputObject{
|
||||
Name: singularName + "Input",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[inputType.Name] = inputType
|
||||
|
||||
orderByType := &schema.InputObject{
|
||||
Name: singularName + "OrderBy",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[orderByType.Name] = orderByType
|
||||
|
||||
expressionTypeName := singularName + "Expression"
|
||||
expressionType := &schema.InputObject{
|
||||
Name: expressionTypeName,
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: "and",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "or",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
|
||||
for _, col := range ti.Columns {
|
||||
colName := col.Name
|
||||
// if !validGraphQLIdentifierRegex.MatchString(colName) {
|
||||
// return errors.New("column name is not a valid GraphQL identifier: " + colName)
|
||||
// }
|
||||
|
||||
colType := gqltype(col)
|
||||
nullableColType := ""
|
||||
if x, ok := colType.(*schema.NonNull); ok {
|
||||
nullableColType = x.OfType.(*schema.TypeName).Name
|
||||
} else {
|
||||
nullableColType = colType.(*schema.TypeName).Name
|
||||
}
|
||||
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
|
||||
for _, f := range funcs {
|
||||
if col.Type != f.Params[0].Type {
|
||||
continue
|
||||
}
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: f.Name + "_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
// If it's a numeric type...
|
||||
if nullableColType == "Float" || nullableColType == "Int" {
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "avg_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "count_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "max_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "min_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "variance_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
inputType.Fields = append(inputType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
orderByType.Fields = append(orderByType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "OrderDirection"}},
|
||||
})
|
||||
|
||||
scalarExpressionTypesNeeded[nullableColType] = true
|
||||
|
||||
expressionType.Fields = append(expressionType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: nullableColType + "Expression"}},
|
||||
})
|
||||
}
|
||||
|
||||
outputTypeName := &schema.TypeName{Name: outputType.Name}
|
||||
inputTypeName := &schema.TypeName{Name: inputType.Name}
|
||||
pluralOutputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: outputType.Name}}}}
|
||||
pluralInputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: inputType.Name}}}}
|
||||
|
||||
args := schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: "To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."},
|
||||
Name: "order_by",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: orderByType.Name}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "where",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionType.Name}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "limit",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "offset",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "first",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "last",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "before",
|
||||
Type: &schema.TypeName{Name: "String"},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "after",
|
||||
Type: &schema.TypeName{Name: "String"},
|
||||
},
|
||||
}
|
||||
if ti.PrimaryCol != nil {
|
||||
t := gqltype(*ti.PrimaryCol)
|
||||
if _, ok := t.(*schema.NonNull); !ok {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: schema.Description{Text: "Finds the record by the primary key"},
|
||||
Name: "id",
|
||||
Type: t,
|
||||
})
|
||||
}
|
||||
|
||||
if ti.TSVCol != nil {
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: schema.Description{Text: "Performs full text search using a TSV index"},
|
||||
Name: "search",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
})
|
||||
}
|
||||
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: singularName,
|
||||
Type: outputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: pluralName,
|
||||
Type: pluralOutputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
|
||||
mutationArgs := append(args, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "insert",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "update",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "upsert",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
}...)
|
||||
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: singularName,
|
||||
Args: mutationArgs,
|
||||
Type: outputType,
|
||||
})
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: pluralName,
|
||||
Args: append(mutationArgs, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "inserts",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "updates",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "upserts",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
}...),
|
||||
Type: outputType,
|
||||
})
|
||||
}
|
||||
|
||||
for typeName := range scalarExpressionTypesNeeded {
|
||||
expressionType := &schema.InputObject{
|
||||
Name: typeName + "Expression",
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: "eq",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "neq",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "gt",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "greater_than",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lt",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lesser_than",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "gte",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "greater_or_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lte",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lesser_or_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "in",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nin",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_in",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Name: "like",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nlike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_like",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "ilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_ilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "similar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nsimilar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_similar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key_any",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key_all",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "contains",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "contained_in",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "is_null",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Boolean"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
}
|
||||
|
||||
if err := engineSchema.ResolveTypes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
engine.Resolver = resolvers.Func(func(request *resolvers.ResolveRequest, next resolvers.Resolution) resolvers.Resolution {
|
||||
resolver := resolvers.MetadataResolver.Resolve(request, next)
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
resolver = resolvers.MethodResolver.Resolve(request, next) // needed by the MetadataResolver
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
sg.ge = engine
|
||||
return nil
|
||||
}
|
241
core/prepare.go
241
core/prepare.go
@ -2,192 +2,117 @@ package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/allow"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/valyala/fasttemplate"
|
||||
)
|
||||
|
||||
type preparedItem struct {
|
||||
type query struct {
|
||||
sync.Once
|
||||
sd *sql.Stmt
|
||||
args [][]byte
|
||||
ai allow.Item
|
||||
qt qcode.QType
|
||||
err error
|
||||
st stmt
|
||||
roleArg bool
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initPrepared() error {
|
||||
ct := context.Background()
|
||||
func (sg *SuperGraph) prepare(q *query, role string) {
|
||||
var stmts []stmt
|
||||
var err error
|
||||
|
||||
qb := []byte(q.ai.Query)
|
||||
|
||||
switch q.qt {
|
||||
case qcode.QTQuery:
|
||||
if sg.abacEnabled {
|
||||
stmts, err = sg.buildMultiStmt(qb, q.ai.Vars)
|
||||
} else {
|
||||
stmts, err = sg.buildRoleStmt(qb, q.ai.Vars, role)
|
||||
}
|
||||
|
||||
case qcode.QTMutation:
|
||||
stmts, err = sg.buildRoleStmt(qb, q.ai.Vars, role)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
sg.log.Printf("WRN %s %s: %v", q.qt, q.ai.Name, err)
|
||||
}
|
||||
|
||||
q.st = stmts[0]
|
||||
q.roleArg = len(stmts) > 1
|
||||
|
||||
q.sd, err = sg.db.Prepare(q.st.sql)
|
||||
if err != nil {
|
||||
q.err = fmt.Errorf("prepare failed: %v: %s", err, q.st.sql)
|
||||
}
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initPrepared() error {
|
||||
if sg.allowList.IsPersist() {
|
||||
return nil
|
||||
}
|
||||
sg.prepared = make(map[string]*preparedItem)
|
||||
|
||||
tx, err := sg.db.BeginTx(ct, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback() //nolint: errcheck
|
||||
|
||||
if err = sg.prepareRoleStmt(tx); err != nil {
|
||||
return fmt.Errorf("prepareRoleStmt: %w", err)
|
||||
if err := sg.prepareRoleStmt(); err != nil {
|
||||
return fmt.Errorf("role query: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
success := 0
|
||||
sg.queries = make(map[uint64]*query)
|
||||
|
||||
list, err := sg.allowList.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h := maphash.Hash{}
|
||||
h.SetSeed(sg.hashSeed)
|
||||
|
||||
for _, v := range list {
|
||||
if len(v.Query) == 0 {
|
||||
continue
|
||||
}
|
||||
q := &query{ai: v, qt: qcode.GetQType(v.Query)}
|
||||
|
||||
err := sg.prepareStmt(v)
|
||||
if err == nil {
|
||||
success++
|
||||
continue
|
||||
}
|
||||
|
||||
// if len(v.Vars) == 0 {
|
||||
// logger.Warn().Err(err).Msg(v.Query)
|
||||
// } else {
|
||||
// logger.Warn().Err(err).Msgf("%s %s", v.Vars, v.Query)
|
||||
// }
|
||||
}
|
||||
|
||||
// logger.Info().
|
||||
// Msgf("Registered %d of %d queries from allow.list as prepared statements",
|
||||
// success, len(list))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) prepareStmt(item allow.Item) error {
|
||||
query := item.Query
|
||||
qb := []byte(query)
|
||||
vars := item.Vars
|
||||
|
||||
qt := qcode.GetQType(query)
|
||||
ct := context.Background()
|
||||
|
||||
tx, err := sg.db.BeginTx(ct, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback() //nolint: errcheck
|
||||
|
||||
switch qt {
|
||||
switch q.qt {
|
||||
case qcode.QTQuery:
|
||||
var stmts1 []stmt
|
||||
var err error
|
||||
|
||||
if sg.abacEnabled {
|
||||
stmts1, err = sg.buildMultiStmt(qb, vars)
|
||||
} else {
|
||||
stmts1, err = sg.buildRoleStmt(qb, vars, "user")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//logger.Debug().Msgf("Prepared statement 'query %s' (user)", item.Name)
|
||||
|
||||
err = sg.prepare(ct, tx, stmts1, stmtHash(item.Name, "user"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sg.queries[queryID(&h, v.Name, "user")] = q
|
||||
h.Reset()
|
||||
|
||||
if sg.anonExists {
|
||||
// logger.Debug().Msgf("Prepared statement 'query %s' (anon)", item.Name)
|
||||
|
||||
stmts2, err := sg.buildRoleStmt(qb, vars, "anon")
|
||||
if err == psql.ErrAllTablesSkipped {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = sg.prepare(ct, tx, stmts2, stmtHash(item.Name, "anon"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sg.queries[queryID(&h, v.Name, "anon")] = q
|
||||
h.Reset()
|
||||
}
|
||||
|
||||
case qcode.QTMutation:
|
||||
for _, role := range sg.conf.Roles {
|
||||
// logger.Debug().Msgf("Prepared statement 'mutation %s' (%s)", item.Name, role.Name)
|
||||
|
||||
stmts, err := sg.buildRoleStmt(qb, vars, role.Name)
|
||||
|
||||
if err != nil {
|
||||
// if len(item.Vars) == 0 {
|
||||
// logger.Warn().Err(err).Msg(item.Query)
|
||||
// } else {
|
||||
// logger.Warn().Err(err).Msgf("%s %s", item.Vars, item.Query)
|
||||
// }
|
||||
continue
|
||||
}
|
||||
|
||||
err = sg.prepare(ct, tx, stmts, stmtHash(item.Name, role.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
sg.queries[queryID(&h, v.Name, role.Name)] = q
|
||||
h.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) prepare(ct context.Context, tx *sql.Tx, st []stmt, key string) error {
|
||||
finalSQL, am := processTemplate(st[0].sql)
|
||||
|
||||
sd, err := tx.Prepare(finalSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.prepared[key] = &preparedItem{
|
||||
sd: sd,
|
||||
args: am,
|
||||
st: st[0],
|
||||
roleArg: len(st) > 1,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
func (sg *SuperGraph) prepareRoleStmt(tx *sql.Tx) error {
|
||||
func (sg *SuperGraph) prepareRoleStmt() error {
|
||||
var err error
|
||||
|
||||
if !sg.abacEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
rq := strings.ReplaceAll(sg.conf.RolesQuery, "$user_id", "$1")
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
io.WriteString(w, `SELECT (CASE WHEN EXISTS (`)
|
||||
io.WriteString(w, sg.conf.RolesQuery)
|
||||
io.WriteString(w, rq)
|
||||
io.WriteString(w, `) THEN `)
|
||||
|
||||
io.WriteString(w, `(SELECT (CASE`)
|
||||
@ -202,14 +127,12 @@ func (sg *SuperGraph) prepareRoleStmt(tx *sql.Tx) error {
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
|
||||
io.WriteString(w, ` ELSE {{role}} END) FROM (`)
|
||||
io.WriteString(w, ` ELSE $2 END) FROM (`)
|
||||
io.WriteString(w, sg.conf.RolesQuery)
|
||||
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
|
||||
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler" LIMIT 1; `)
|
||||
|
||||
roleSQL, _ := processTemplate(w.String())
|
||||
|
||||
sg.getRole, err = tx.Prepare(roleSQL)
|
||||
sg.getRole, err = sg.db.Prepare(w.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -217,47 +140,18 @@ func (sg *SuperGraph) prepareRoleStmt(tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func processTemplate(tmpl string) (string, [][]byte) {
|
||||
st := struct {
|
||||
vmap map[string]int
|
||||
am [][]byte
|
||||
i int
|
||||
}{
|
||||
vmap: make(map[string]int),
|
||||
am: make([][]byte, 0, 5),
|
||||
i: 0,
|
||||
}
|
||||
|
||||
execFunc := func(w io.Writer, tag string) (int, error) {
|
||||
if n, ok := st.vmap[tag]; ok {
|
||||
return w.Write([]byte(fmt.Sprintf("$%d", n)))
|
||||
}
|
||||
st.am = append(st.am, []byte(tag))
|
||||
st.i++
|
||||
st.vmap[tag] = st.i
|
||||
return w.Write([]byte(fmt.Sprintf("$%d", st.i)))
|
||||
}
|
||||
|
||||
t1 := fasttemplate.New(tmpl, `'{{`, `}}'`)
|
||||
ts1 := t1.ExecuteFuncString(execFunc)
|
||||
|
||||
t2 := fasttemplate.New(ts1, `{{`, `}}`)
|
||||
ts2 := t2.ExecuteFuncString(execFunc)
|
||||
|
||||
return ts2, st.am
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initAllowList() error {
|
||||
var ac allow.Config
|
||||
var err error
|
||||
|
||||
if len(sg.conf.AllowListFile) == 0 {
|
||||
sg.conf.UseAllowList = false
|
||||
sg.log.Printf("WRN allow list disabled no file specified")
|
||||
if sg.conf.AllowListFile == "" {
|
||||
sg.conf.AllowListFile = "allow.list"
|
||||
}
|
||||
|
||||
if sg.conf.UseAllowList {
|
||||
ac = allow.Config{CreateIfNotExists: true, Persist: true}
|
||||
// When list is not eabled it is still created and
|
||||
// and new queries are saved to it.
|
||||
if !sg.conf.UseAllowList {
|
||||
ac = allow.Config{CreateIfNotExists: true, Persist: true, Log: sg.log}
|
||||
}
|
||||
|
||||
sg.allowList, err = allow.New(sg.conf.AllowListFile, ac)
|
||||
@ -269,9 +163,8 @@ func (sg *SuperGraph) initAllowList() error {
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
func stmtHash(name string, role string) string {
|
||||
h := sha1.New()
|
||||
io.WriteString(h, strings.ToLower(name))
|
||||
io.WriteString(h, role)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
func queryID(h *maphash.Hash, name string, role string) uint64 {
|
||||
h.WriteString(name)
|
||||
h.WriteString(role)
|
||||
return h.Sum64()
|
||||
}
|
||||
|
382
core/remote.go
382
core/remote.go
@ -1,253 +1,249 @@
|
||||
package core
|
||||
|
||||
// import (
|
||||
// "bytes"
|
||||
// "errors"
|
||||
// "fmt"
|
||||
// "net/http"
|
||||
// "sync"
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
// "github.com/cespare/xxhash/v2"
|
||||
// "github.com/dosco/super-graph/jsn"
|
||||
// "github.com/dosco/super-graph/core/internal/qcode"
|
||||
// )
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
// func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
||||
// var err error
|
||||
func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
// if len(data) == 0 || st.skipped == 0 {
|
||||
// return data, nil
|
||||
// }
|
||||
sel := st.qc.Selects
|
||||
h := xxhash.New()
|
||||
|
||||
// sel := st.qc.Selects
|
||||
// h := xxhash.New()
|
||||
// fetch the field name used within the db response json
|
||||
// that are used to mark insertion points and the mapping between
|
||||
// those field names and their select objects
|
||||
fids, sfmap := sg.parentFieldIds(h, sel, st.md.Skipped)
|
||||
|
||||
// // fetch the field name used within the db response json
|
||||
// // that are used to mark insertion points and the mapping between
|
||||
// // those field names and their select objects
|
||||
// fids, sfmap := parentFieldIds(h, sel, st.skipped)
|
||||
// fetch the field values of the marked insertion points
|
||||
// these values contain the id to be used with fetching remote data
|
||||
from := jsn.Get(data, fids)
|
||||
var to []jsn.Field
|
||||
|
||||
// // fetch the field values of the marked insertion points
|
||||
// // these values contain the id to be used with fetching remote data
|
||||
// from := jsn.Get(data, fids)
|
||||
// var to []jsn.Field
|
||||
switch {
|
||||
case len(from) == 1:
|
||||
to, err = sg.resolveRemote(hdr, h, from[0], sel, sfmap)
|
||||
|
||||
// switch {
|
||||
// case len(from) == 1:
|
||||
// to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
|
||||
case len(from) > 1:
|
||||
to, err = sg.resolveRemotes(hdr, h, from, sel, sfmap)
|
||||
|
||||
// case len(from) > 1:
|
||||
// to, err = resolveRemotes(hdr, h, from, sel, sfmap)
|
||||
default:
|
||||
return nil, errors.New("something wrong no remote ids found in db response")
|
||||
}
|
||||
|
||||
// default:
|
||||
// return nil, errors.New("something wrong no remote ids found in db response")
|
||||
// }
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
var ob bytes.Buffer
|
||||
|
||||
// var ob bytes.Buffer
|
||||
err = jsn.Replace(&ob, data, from, to)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// err = jsn.Replace(&ob, data, from, to)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
return ob.Bytes(), nil
|
||||
}
|
||||
|
||||
// return ob.Bytes(), nil
|
||||
// }
|
||||
func (sg *SuperGraph) resolveRemote(
|
||||
hdr http.Header,
|
||||
h *xxhash.Digest,
|
||||
field jsn.Field,
|
||||
sel []qcode.Select,
|
||||
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
|
||||
// func resolveRemote(
|
||||
// hdr http.Header,
|
||||
// h *xxhash.Digest,
|
||||
// field jsn.Field,
|
||||
// sel []qcode.Select,
|
||||
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
// replacement data for the marked insertion points
|
||||
// key and value will be replaced by whats below
|
||||
toA := [1]jsn.Field{}
|
||||
to := toA[:1]
|
||||
|
||||
// // replacement data for the marked insertion points
|
||||
// // key and value will be replaced by whats below
|
||||
// toA := [1]jsn.Field{}
|
||||
// to := toA[:1]
|
||||
// use the json key to find the related Select object
|
||||
k1 := xxhash.Sum64(field.Key)
|
||||
|
||||
// // use the json key to find the related Select object
|
||||
// k1 := xxhash.Sum64(field.Key)
|
||||
s, ok := sfmap[k1]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
p := sel[s.ParentID]
|
||||
|
||||
// s, ok := sfmap[k1]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
// p := sel[s.ParentID]
|
||||
// then use the Table nme in the Select and it's parent
|
||||
// to find the resolver to use for this relationship
|
||||
k2 := mkkey(h, s.Name, p.Name)
|
||||
|
||||
// // then use the Table nme in the Select and it's parent
|
||||
// // to find the resolver to use for this relationship
|
||||
// k2 := mkkey(h, s.Name, p.Name)
|
||||
r, ok := sg.rmap[k2]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// r, ok := rmap[k2]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
id := jsn.Value(field.Value)
|
||||
if len(id) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// id := jsn.Value(field.Value)
|
||||
// if len(id) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//st := time.Now()
|
||||
|
||||
// //st := time.Now()
|
||||
b, err := r.Fn(hdr, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// b, err := r.Fn(hdr, id)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
if len(r.Path) != 0 {
|
||||
b = jsn.Strip(b, r.Path)
|
||||
}
|
||||
|
||||
// if len(r.Path) != 0 {
|
||||
// b = jsn.Strip(b, r.Path)
|
||||
// }
|
||||
var ob bytes.Buffer
|
||||
|
||||
// var ob bytes.Buffer
|
||||
if len(s.Cols) != 0 {
|
||||
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if len(s.Cols) != 0 {
|
||||
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
} else {
|
||||
ob.WriteString("null")
|
||||
}
|
||||
|
||||
// } else {
|
||||
// ob.WriteString("null")
|
||||
// }
|
||||
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
return to, nil
|
||||
}
|
||||
|
||||
// to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
// return to, nil
|
||||
// }
|
||||
func (sg *SuperGraph) resolveRemotes(
|
||||
hdr http.Header,
|
||||
h *xxhash.Digest,
|
||||
from []jsn.Field,
|
||||
sel []qcode.Select,
|
||||
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
|
||||
// func resolveRemotes(
|
||||
// hdr http.Header,
|
||||
// h *xxhash.Digest,
|
||||
// from []jsn.Field,
|
||||
// sel []qcode.Select,
|
||||
// sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
// replacement data for the marked insertion points
|
||||
// key and value will be replaced by whats below
|
||||
to := make([]jsn.Field, len(from))
|
||||
|
||||
// // replacement data for the marked insertion points
|
||||
// // key and value will be replaced by whats below
|
||||
// to := make([]jsn.Field, len(from))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(from))
|
||||
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(len(from))
|
||||
var cerr error
|
||||
|
||||
// var cerr error
|
||||
for i, id := range from {
|
||||
|
||||
// for i, id := range from {
|
||||
// use the json key to find the related Select object
|
||||
k1 := xxhash.Sum64(id.Key)
|
||||
|
||||
// // use the json key to find the related Select object
|
||||
// k1 := xxhash.Sum64(id.Key)
|
||||
s, ok := sfmap[k1]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
p := sel[s.ParentID]
|
||||
|
||||
// s, ok := sfmap[k1]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
// p := sel[s.ParentID]
|
||||
// then use the Table nme in the Select and it's parent
|
||||
// to find the resolver to use for this relationship
|
||||
k2 := mkkey(h, s.Name, p.Name)
|
||||
|
||||
// // then use the Table nme in the Select and it's parent
|
||||
// // to find the resolver to use for this relationship
|
||||
// k2 := mkkey(h, s.Name, p.Name)
|
||||
r, ok := sg.rmap[k2]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// r, ok := rmap[k2]
|
||||
// if !ok {
|
||||
// return nil, nil
|
||||
// }
|
||||
id := jsn.Value(id.Value)
|
||||
if len(id) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// id := jsn.Value(id.Value)
|
||||
// if len(id) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
go func(n int, id []byte, s *qcode.Select) {
|
||||
defer wg.Done()
|
||||
|
||||
// go func(n int, id []byte, s *qcode.Select) {
|
||||
// defer wg.Done()
|
||||
//st := time.Now()
|
||||
|
||||
// //st := time.Now()
|
||||
b, err := r.Fn(hdr, id)
|
||||
if err != nil {
|
||||
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// b, err := r.Fn(hdr, id)
|
||||
// if err != nil {
|
||||
// cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
// return
|
||||
// }
|
||||
if len(r.Path) != 0 {
|
||||
b = jsn.Strip(b, r.Path)
|
||||
}
|
||||
|
||||
// if len(r.Path) != 0 {
|
||||
// b = jsn.Strip(b, r.Path)
|
||||
// }
|
||||
var ob bytes.Buffer
|
||||
|
||||
// var ob bytes.Buffer
|
||||
if len(s.Cols) != 0 {
|
||||
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
if err != nil {
|
||||
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// if len(s.Cols) != 0 {
|
||||
// err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
// if err != nil {
|
||||
// cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
// return
|
||||
// }
|
||||
} else {
|
||||
ob.WriteString("null")
|
||||
}
|
||||
|
||||
// } else {
|
||||
// ob.WriteString("null")
|
||||
// }
|
||||
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
}(i, id, s)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
// }(i, id, s)
|
||||
// }
|
||||
// wg.Wait()
|
||||
return to, cerr
|
||||
}
|
||||
|
||||
// return to, cerr
|
||||
// }
|
||||
func (sg *SuperGraph) parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
|
||||
[][]byte,
|
||||
map[uint64]*qcode.Select) {
|
||||
|
||||
// func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
|
||||
// [][]byte,
|
||||
// map[uint64]*qcode.Select) {
|
||||
c := 0
|
||||
for i := range sel {
|
||||
s := &sel[i]
|
||||
if isSkipped(skipped, uint32(s.ID)) {
|
||||
c++
|
||||
}
|
||||
}
|
||||
|
||||
// c := 0
|
||||
// for i := range sel {
|
||||
// s := &sel[i]
|
||||
// if isSkipped(skipped, uint32(s.ID)) {
|
||||
// c++
|
||||
// }
|
||||
// }
|
||||
// list of keys (and it's related value) to extract from
|
||||
// the db json response
|
||||
fm := make([][]byte, c)
|
||||
|
||||
// // list of keys (and it's related value) to extract from
|
||||
// // the db json response
|
||||
// fm := make([][]byte, c)
|
||||
// mapping between the above extracted key and a Select
|
||||
// object
|
||||
sm := make(map[uint64]*qcode.Select, c)
|
||||
n := 0
|
||||
|
||||
// // mapping between the above extracted key and a Select
|
||||
// // object
|
||||
// sm := make(map[uint64]*qcode.Select, c)
|
||||
// n := 0
|
||||
for i := range sel {
|
||||
s := &sel[i]
|
||||
|
||||
// for i := range sel {
|
||||
// s := &sel[i]
|
||||
if !isSkipped(skipped, uint32(s.ID)) {
|
||||
continue
|
||||
}
|
||||
|
||||
// if !isSkipped(skipped, uint32(s.ID)) {
|
||||
// continue
|
||||
// }
|
||||
p := sel[s.ParentID]
|
||||
k := mkkey(h, s.Name, p.Name)
|
||||
|
||||
// p := sel[s.ParentID]
|
||||
// k := mkkey(h, s.Name, p.Name)
|
||||
if r, ok := sg.rmap[k]; ok {
|
||||
fm[n] = r.IDField
|
||||
n++
|
||||
|
||||
// if r, ok := rmap[k]; ok {
|
||||
// fm[n] = r.IDField
|
||||
// n++
|
||||
k := xxhash.Sum64(r.IDField)
|
||||
sm[k] = s
|
||||
}
|
||||
}
|
||||
|
||||
// k := xxhash.Sum64(r.IDField)
|
||||
// sm[k] = s
|
||||
// }
|
||||
// }
|
||||
return fm, sm
|
||||
}
|
||||
|
||||
// return fm, sm
|
||||
// }
|
||||
func isSkipped(n uint32, pos uint32) bool {
|
||||
return ((n & (1 << pos)) != 0)
|
||||
}
|
||||
|
||||
// func isSkipped(n uint32, pos uint32) bool {
|
||||
// return ((n & (1 << pos)) != 0)
|
||||
// }
|
||||
func colsToList(cols []qcode.Column) []string {
|
||||
var f []string
|
||||
|
||||
// func colsToList(cols []qcode.Column) []string {
|
||||
// var f []string
|
||||
|
||||
// for i := range cols {
|
||||
// f = append(f, cols[i].Name)
|
||||
// }
|
||||
// return f
|
||||
// }
|
||||
for i := range cols {
|
||||
f = append(f, cols[i].Name)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
131
core/resolve.go
131
core/resolve.go
@ -6,90 +6,90 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
var (
|
||||
rmap map[uint64]*resolvFn
|
||||
)
|
||||
|
||||
type resolvFn struct {
|
||||
IDField []byte
|
||||
Path [][]byte
|
||||
Fn func(h http.Header, id []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// func initResolvers() {
|
||||
// var err error
|
||||
// rmap = make(map[uint64]*resolvFn)
|
||||
func (sg *SuperGraph) initResolvers() error {
|
||||
var err error
|
||||
sg.rmap = make(map[uint64]*resolvFn)
|
||||
|
||||
// for _, t := range conf.Tables {
|
||||
// err = initRemotes(t)
|
||||
// if err != nil {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
for _, t := range sg.conf.Tables {
|
||||
err = sg.initRemotes(t)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// if err != nil {
|
||||
// errlog.Fatal().Err(err).Msg("failed to initialize resolvers")
|
||||
// }
|
||||
// }
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize resolvers: %v", err)
|
||||
}
|
||||
|
||||
// func initRemotes(t Table) error {
|
||||
// h := xxhash.New()
|
||||
return nil
|
||||
}
|
||||
|
||||
// for _, r := range t.Remotes {
|
||||
// // defines the table column to be used as an id in the
|
||||
// // remote request
|
||||
// idcol := r.ID
|
||||
func (sg *SuperGraph) initRemotes(t Table) error {
|
||||
h := xxhash.New()
|
||||
|
||||
// // if no table column specified in the config then
|
||||
// // use the primary key of the table as the id
|
||||
// if len(idcol) == 0 {
|
||||
// pcol, err := pcompile.IDColumn(t.Name)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// idcol = pcol.Key
|
||||
// }
|
||||
// idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
|
||||
for _, r := range t.Remotes {
|
||||
// defines the table column to be used as an id in the
|
||||
// remote request
|
||||
idcol := r.ID
|
||||
|
||||
// // register a relationship between the remote data
|
||||
// // and the database table
|
||||
// if no table column specified in the config then
|
||||
// use the primary key of the table as the id
|
||||
if len(idcol) == 0 {
|
||||
pcol, err := sg.pc.IDColumn(t.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idcol = pcol.Key
|
||||
}
|
||||
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
|
||||
|
||||
// val := &psql.DBRel{Type: psql.RelRemote}
|
||||
// val.Left.Col = idcol
|
||||
// val.Right.Col = idk
|
||||
// register a relationship between the remote data
|
||||
// and the database table
|
||||
|
||||
// err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
val := &psql.DBRel{Type: psql.RelRemote}
|
||||
val.Left.Col = idcol
|
||||
val.Right.Col = idk
|
||||
|
||||
// // the function thats called to resolve this remote
|
||||
// // data request
|
||||
// fn := buildFn(r)
|
||||
err := sg.pc.AddRelationship(sanitize(r.Name), t.Name, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// path := [][]byte{}
|
||||
// for _, p := range strings.Split(r.Path, ".") {
|
||||
// path = append(path, []byte(p))
|
||||
// }
|
||||
// the function thats called to resolve this remote
|
||||
// data request
|
||||
fn := buildFn(r)
|
||||
|
||||
// rf := &resolvFn{
|
||||
// IDField: []byte(idk),
|
||||
// Path: path,
|
||||
// Fn: fn,
|
||||
// }
|
||||
path := [][]byte{}
|
||||
for _, p := range strings.Split(r.Path, ".") {
|
||||
path = append(path, []byte(p))
|
||||
}
|
||||
|
||||
// // index resolver obj by parent and child names
|
||||
// rmap[mkkey(h, r.Name, t.Name)] = rf
|
||||
rf := &resolvFn{
|
||||
IDField: []byte(idk),
|
||||
Path: path,
|
||||
Fn: fn,
|
||||
}
|
||||
|
||||
// // index resolver obj by IDField
|
||||
// rmap[xxhash.Sum64(rf.IDField)] = rf
|
||||
// }
|
||||
// index resolver obj by parent and child names
|
||||
sg.rmap[mkkey(h, r.Name, t.Name)] = rf
|
||||
|
||||
// return nil
|
||||
// }
|
||||
// index resolver obj by IDField
|
||||
sg.rmap[xxhash.Sum64(rf.IDField)] = rf
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||
reqURL := strings.Replace(r.URL, "$id", "%s", 1)
|
||||
@ -114,16 +114,13 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||
req.Header.Set(v, hdr.Get(v))
|
||||
}
|
||||
|
||||
// logger.Debug().Str("uri", uri).Msg("Remote Join")
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
// errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri)
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to connect to '%s': %v", uri, err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if r.Debug {
|
||||
// if r.Debug {
|
||||
// reqDump, err := httputil.DumpRequestOut(req, true)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
@ -136,7 +133,7 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||
|
||||
// logger.Debug().Msgf("Remote Request Debug:\n%s\n%s",
|
||||
// reqDump, resDump)
|
||||
}
|
||||
// }
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return nil,
|
||||
|
15
core/utils.go
Normal file
15
core/utils.go
Normal file
@ -0,0 +1,15 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
// nolint: errcheck
|
||||
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
|
||||
h.WriteString(k1)
|
||||
h.WriteString(k2)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
|
||||
return v
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
<template>
|
||||
<div class="shadow bg-white p-4 flex items-start" :class="className">
|
||||
<div class="shadow p-4 flex items-start" :class="className">
|
||||
<slot name="image"></slot>
|
||||
<div class="pl-4">
|
||||
<h2 class="p-0">
|
||||
|
@ -2,33 +2,33 @@
|
||||
<div>
|
||||
<main aria-labelledby="main-title" >
|
||||
<Navbar />
|
||||
<div style="height: 3.6rem"></div>
|
||||
|
||||
<div class="container mx-auto mt-24">
|
||||
<div class="container mx-auto pt-4">
|
||||
<div class="text-center">
|
||||
<div class="text-center text-4xl text-gray-800 leading-tight">
|
||||
<div class="text-center text-3xl md:text-4xl text-black leading-tight font-semibold">
|
||||
Fetch data without code
|
||||
</div>
|
||||
|
||||
<NavLink
|
||||
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded"
|
||||
class="inline-block px-4 py-3 my-8 bg-blue-600 text-white font-bold rounded"
|
||||
:item="actionLink"
|
||||
/>
|
||||
|
||||
<a
|
||||
class="px-4 py-3 my-8 border-2 border-gray-500 text-gray-600 font-bold rounded"
|
||||
class="px-4 py-3 my-8 border-2 border-blue-600 text-blue-600 font-bold rounded"
|
||||
href="https://github.com/dosco/super-graph"
|
||||
target="_blank"
|
||||
>Github</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="container mx-auto mb-8">
|
||||
|
||||
<div class="container mx-auto mb-8 mt-0 md:mt-20 bg-green-100">
|
||||
<div class="flex flex-wrap">
|
||||
<div class="w-100 md:w-1/2 bg-indigo-300 text-indigo-800 text-lg p-4">
|
||||
<div class="text-center text-2xl font-bold pb-2">Before, struggle with SQL</div>
|
||||
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
|
||||
<div class="text-xl font-bold pb-4">Before, struggle with SQL</div>
|
||||
<pre>
|
||||
|
||||
type User struct {
|
||||
gorm.Model
|
||||
Profile Profile
|
||||
@ -50,8 +50,9 @@ db.Model(&user).
|
||||
and more ...
|
||||
</pre>
|
||||
</div>
|
||||
<div class="w-100 md:w-1/2 bg-green-300 text-black text-lg p-4">
|
||||
<div class="text-center text-2xl font-bold pb-2">With Super Graph, just ask.</div>
|
||||
|
||||
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
|
||||
<div class="text-xl font-bold pb-4">With Super Graph, just ask.</div>
|
||||
<pre>
|
||||
query {
|
||||
user(id: 5) {
|
||||
@ -59,26 +60,24 @@ query {
|
||||
first_name
|
||||
last_name
|
||||
picture_url
|
||||
}
|
||||
posts(first: 20, order_by: { score: desc }) {
|
||||
slug
|
||||
title
|
||||
created_at
|
||||
cached_votes_total
|
||||
vote(where: { user_id: { eq: $user_id } }) {
|
||||
id
|
||||
}
|
||||
votes_total
|
||||
votes { created_at }
|
||||
author { id name }
|
||||
tags { id name }
|
||||
}
|
||||
posts_cursor
|
||||
}
|
||||
}
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div class="mt-0 md:mt-20">
|
||||
<div
|
||||
class="flex flex-wrap mx-2 md:mx-20"
|
||||
v-if="data.features && data.features.length"
|
||||
@ -89,42 +88,29 @@ query {
|
||||
:key="index"
|
||||
>
|
||||
<div class="p-8">
|
||||
<h2 class="md:text-xl text-blue-800 font-medium border-0 mb-1">{{ feature.title }}</h2>
|
||||
<p class="md:text-xl text-gray-700 leading-snug">{{ feature.details }}</p>
|
||||
<h2 class="text-lg uppercase border-0">{{ feature.title }}</h2>
|
||||
<div class="text-xl text-gray-900 leading-snug">{{ feature.details }}</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="bg-gray-100 mt-10">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<div class="pt-0 md:pt-20">
|
||||
<div class="container mx-auto p-10">
|
||||
|
||||
<div class="pb-8 hidden md:flex justify-center">
|
||||
<div class="flex justify-center pb-20">
|
||||
<img src="arch-basic.svg">
|
||||
</div>
|
||||
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 text-center mb-4">
|
||||
What is Super Graph?
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making ineffiient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
|
||||
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making inefficient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="container mx-auto flex flex-wrap">
|
||||
<div class="md:w-1/2">
|
||||
<img src="/graphql.png">
|
||||
</div>
|
||||
|
||||
<div class="md:w-1/2">
|
||||
<img src="/json.png">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="mt-10 py-10 md:py-20">
|
||||
<div class="pt-20">
|
||||
<div class="container mx-auto px-10 md:px-0">
|
||||
<h1 class="uppercase font-semibold text-2xl text-blue-800 text-center">
|
||||
Try Super Graph
|
||||
@ -133,20 +119,18 @@ query {
|
||||
<h1 class="uppercase font-semibold text-lg text-gray-800">
|
||||
Deploy as a service using docker
|
||||
</h1>
|
||||
<div class="bg-gray-800 text-indigo-300 p-4 rounded">
|
||||
<div class="p-4 rounded bg-black text-white">
|
||||
<pre>$ git clone https://github.com/dosco/super-graph && cd super-graph && make install</pre>
|
||||
<pre>$ super-graph new blog; cd blog</pre>
|
||||
<pre>$ docker-compose run blog_api ./super-graph db:setup</pre>
|
||||
<pre>$ docker-compose up</pre>
|
||||
</div>
|
||||
|
||||
<div class="border-t mt-4 pb-4"></div>
|
||||
|
||||
<h1 class="uppercase font-semibold text-lg text-gray-800">
|
||||
Or use it with your own code
|
||||
</h1>
|
||||
<div class="text-md">
|
||||
<pre class="bg-gray-800 text-indigo-300 p-4 rounded">
|
||||
<pre class="p-4 rounded bg-black text-white">
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -161,17 +145,12 @@ import (
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
conf, err := config.NewConfig("./config")
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
}
|
||||
|
||||
sg, err = core.NewSuperGraph(conf, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
graphqlQuery := `
|
||||
@ -184,7 +163,7 @@ func main() {
|
||||
|
||||
res, err := sg.GraphQL(context.Background(), graphqlQuery, nil)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
@ -194,7 +173,7 @@ func main() {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-gray-100 mt-10">
|
||||
<div class="pt-0 md:pt-20">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
The story of {{ data.heroText }}
|
||||
@ -245,7 +224,7 @@ func main() {
|
||||
</div>
|
||||
-->
|
||||
|
||||
<div class="border-t py-10">
|
||||
<div class="pt-0 md:pt-20">
|
||||
<div class="block md:hidden w-100">
|
||||
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;">
|
||||
</iframe>
|
||||
@ -267,7 +246,101 @@ func main() {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-gray-200 mt-10">
|
||||
<div class="container mx-auto pt-0 md:pt-20">
|
||||
<div class="flex flex-wrap bg-green-100">
|
||||
<div class="w-100 md:w-1/2 border border-green-500 text-gray-6 00 text-sm md:text-lg p-6">
|
||||
<div class="text-xl font-bold pb-4">No more joins joins, json, orms, just use GraphQL. Fetch all the data want in the structure you need.</div>
|
||||
<pre>
|
||||
query {
|
||||
thread {
|
||||
slug
|
||||
title
|
||||
published
|
||||
createdAt : created_at
|
||||
totalVotes : cached_votes_total
|
||||
totalPosts : cached_posts_total
|
||||
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
|
||||
created_at
|
||||
}
|
||||
topics {
|
||||
slug
|
||||
name
|
||||
}
|
||||
author : me {
|
||||
slug
|
||||
}
|
||||
posts(first: 1, order_by: { score: desc }) {
|
||||
slug
|
||||
body
|
||||
published
|
||||
createdAt : created_at
|
||||
totalVotes : cached_votes_total
|
||||
totalComments : cached_comments_total
|
||||
vote {
|
||||
created_at
|
||||
}
|
||||
author : user {
|
||||
slug
|
||||
firstName : first_name
|
||||
lastName : last_name
|
||||
}
|
||||
}
|
||||
posts_cursor
|
||||
}
|
||||
}
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
<div class="w-100 md:w-1/2 border border-l md:border-l-0 border-green-500 text-blue-900 text-sm md:text-lg p-6">
|
||||
<div class="text-xl font-bold pb-4">Instant results using a single highly optimized SQL. It's just that simple.</div>
|
||||
<pre>
|
||||
{
|
||||
"data": {
|
||||
"thread": {
|
||||
"slug": "eveniet-ex-24",
|
||||
"vote": null,
|
||||
"posts": [
|
||||
{
|
||||
"body": "Dolor laborum harum sed sit est ducimus temporibus velit non nobis repudiandae nobis suscipit commodi voluptatem debitis sed voluptas sequi officia.",
|
||||
"slug": "illum-in-voluptas-1418",
|
||||
"vote": null,
|
||||
"author": {
|
||||
"slug": "sigurd-kemmer",
|
||||
"lastName": "Effertz",
|
||||
"firstName": "Brandt"
|
||||
},
|
||||
"createdAt": "2020-04-07T04:22:42.115874+00:00",
|
||||
"published": true,
|
||||
"totalVotes": 0,
|
||||
"totalComments": 2
|
||||
}
|
||||
],
|
||||
"title": "In aut qui deleniti quia dolore quasi porro tenetur voluptatem ut adita alias fugit explicabo.",
|
||||
"author": null,
|
||||
"topics": [
|
||||
{
|
||||
"name": "CloudRun",
|
||||
"slug": "cloud-run"
|
||||
},
|
||||
{
|
||||
"name": "Postgres",
|
||||
"slug": "postgres"
|
||||
}
|
||||
],
|
||||
"createdAt": "2020-04-07T04:22:38.099482+00:00",
|
||||
"published": true,
|
||||
"totalPosts": 24,
|
||||
"totalVotes": 0,
|
||||
"posts_cursor": "mpeBl6L+QfJHc3cmLkLDj9pOdEZYTt5KQtLsazG3TLITB3hJhg=="
|
||||
}
|
||||
}
|
||||
}
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="pt-0 md:pt-20">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
Build Secure Apps
|
||||
@ -292,8 +365,8 @@ func main() {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<div class="pt-0 md:py -20">
|
||||
<div class="container mx-auto">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
More Features
|
||||
</h1>
|
||||
|
@ -15,7 +15,7 @@ module.exports = {
|
||||
{ text: 'Internals', link: '/internals' },
|
||||
{ text: 'Github', link: 'https://github.com/dosco/super-graph' },
|
||||
{ text: 'Docker', link: 'https://hub.docker.com/r/dosco/super-graph/builds' },
|
||||
{ text: 'Join Chat', link: 'https://discord.gg/NKdXBc' },
|
||||
{ text: 'Join Chat', link: 'https://discord.com/invite/23Wh7c' },
|
||||
],
|
||||
serviceWorker: {
|
||||
updatePopup: true
|
||||
|
@ -1,6 +1,9 @@
|
||||
@tailwind base;
|
||||
|
||||
@css {
|
||||
body, .navbar, .navbar .links {
|
||||
@apply bg-white text-black border-0 !important;
|
||||
}
|
||||
h1 {
|
||||
@apply font-semibold text-3xl border-0 py-4
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ longTagline: Get an instant high performance GraphQL API for Postgres. No code n
|
||||
actionText: Get Started, Free, Open Source →
|
||||
actionLink: /guide
|
||||
|
||||
description: Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
|
||||
description: Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on your apps core value.
|
||||
|
||||
features:
|
||||
- title: Simple
|
||||
|
@ -32,7 +32,7 @@ For this to work you have to ensure that the option `:domain => :all` is added t
|
||||
|
||||
### With an NGINX loadbalancer
|
||||
|
||||
If you're infrastructure is fronted by NGINX then it should be configured so that all requests to your GraphQL API path are proxyed to Super Graph. In the example NGINX config below all requests to the path `/api/v1/graphql` are routed to wherever you have Super Graph installed within your architecture. This example is derived from the config file example at [/microservices-nginx-gateway/nginx.conf](https://github.com/launchany/microservices-nginx-gateway/blob/master/nginx.conf)
|
||||
If your infrastructure is fronted by NGINX then it should be configured so that all requests to your GraphQL API path are proxyed to Super Graph. In the example NGINX config below all requests to the path `/api/v1/graphql` are routed to wherever you have Super Graph installed within your architecture. This example is derived from the config file example at [/microservices-nginx-gateway/nginx.conf](https://github.com/launchany/microservices-nginx-gateway/blob/master/nginx.conf)
|
||||
|
||||
::: tip NGINX with sub-domain
|
||||
Yes, NGINX is very flexible and you can configure it to keep Super Graph a subdomain instead of on the same top level domain. I'm sure a little Googleing will get you some great example configs for that.
|
||||
|
@ -347,12 +347,10 @@ beer_style
|
||||
beer_yeast
|
||||
|
||||
// Cars
|
||||
vehicle
|
||||
vehicle_type
|
||||
car
|
||||
car_type
|
||||
car_maker
|
||||
car_model
|
||||
fuel_type
|
||||
transmission_gear_type
|
||||
|
||||
// Text
|
||||
word
|
||||
@ -438,8 +436,8 @@ hipster_paragraph
|
||||
hipster_sentence
|
||||
|
||||
// File
|
||||
extension
|
||||
mine_type
|
||||
file_extension
|
||||
file_mine_type
|
||||
|
||||
// Numbers
|
||||
number
|
||||
@ -463,11 +461,18 @@ mac_address
|
||||
digit
|
||||
letter
|
||||
lexify
|
||||
rand_string
|
||||
shuffle_strings
|
||||
numerify
|
||||
```
|
||||
|
||||
Other utility functions
|
||||
|
||||
```
|
||||
shuffle_strings(string_array)
|
||||
make_slug(text)
|
||||
make_slug_lang(text, lang)
|
||||
```
|
||||
|
||||
### Migrations
|
||||
|
||||
Easy database migrations is the most important thing when building products backend by a relational database. We make it super easy to manage and migrate your database.
|
||||
@ -725,6 +730,32 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
### Custom Functions
|
||||
|
||||
Any function defined in the database like the below `add_five` that adds 5 to any number given to it can be used
|
||||
within your query. The one limitation is that it should be a function that only accepts a single argument. The function is used within you're GraphQL in similar way to how aggregrations are used above. Example below
|
||||
|
||||
```grahql
|
||||
query {
|
||||
thread(id: 5) {
|
||||
id
|
||||
total_votes
|
||||
add_five_total_votes
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Postgres user-defined function `add_five`
|
||||
```
|
||||
CREATE OR REPLACE FUNCTION add_five(a integer) RETURNS integer AS $$
|
||||
BEGIN
|
||||
|
||||
RETURN a + 5;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
```
|
||||
|
||||
|
||||
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
|
||||
|
||||
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
|
||||
@ -1038,7 +1069,7 @@ mutation {
|
||||
|
||||
### Pagination
|
||||
|
||||
This is a must have feature of any API. When you want your users to go thought a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
|
||||
This is a must have feature of any API. When you want your users to go through a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
|
||||
|
||||
Limit-Offset
|
||||
This is simple enough but also inefficient when working with a large number of total items. Limit, limits the number of items fetched and offset is the point you want to fetch from. The below query will fetch 10 results at a time starting with the 100th item. You will have to keep updating offset (110, 120, 130, etc ) to walk thought the results so make offset a variable.
|
||||
@ -1054,7 +1085,7 @@ query {
|
||||
```
|
||||
|
||||
#### Cursor
|
||||
This is a powerful and highly efficient way to paginate though a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward of backward though the results. If you plan to implement infinite scroll this is the option you should choose.
|
||||
This is a powerful and highly efficient way to paginate a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward or backward through the results. If you plan to implement infinite scroll this is the option you should choose.
|
||||
|
||||
When going this route the results will contain a cursor value this is an encrypted string that you don't have to worry about just pass this back in to the next API call and you'll received the next set of results. The cursor value is encrypted since its contents should only matter to Super Graph and not the client. Also since the primary key is used for this feature it's possible you might not want to leak it's value to clients.
|
||||
|
||||
@ -1704,7 +1735,7 @@ reload_on_config_change: true
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./config/migrations
|
||||
migrations_path: ./migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
@ -1790,12 +1821,31 @@ database:
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
|
||||
# Set up an secure tls encrypted db connection
|
||||
enable_tls: false
|
||||
|
||||
# Required for tls. For example with Google Cloud SQL it's
|
||||
# <gcp-project-id>:<cloud-sql-instance>"
|
||||
# server_name: blah
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# server_cert: ./server-ca.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_cert: ./client-cert.pem
|
||||
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_key: ./client-key.pem
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
admin_account_id: "5"
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
|
20
docs/website/.gitignore
vendored
Normal file
20
docs/website/.gitignore
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
33
docs/website/README.md
Normal file
33
docs/website/README.md
Normal file
@ -0,0 +1,33 @@
|
||||
# Website
|
||||
|
||||
This website is built using [Docusaurus 2](https://v2.docusaurus.io/), a modern static website generator.
|
||||
|
||||
### Installation
|
||||
|
||||
```
|
||||
$ yarn
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
```
|
||||
$ yarn start
|
||||
```
|
||||
|
||||
This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server.
|
||||
|
||||
### Build
|
||||
|
||||
```
|
||||
$ yarn build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
|
||||
### Deployment
|
||||
|
||||
```
|
||||
$ GIT_USER=<Your GitHub username> USE_SSH=true yarn deploy
|
||||
```
|
||||
|
||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
207
docs/website/docs/advanced.md
Normal file
207
docs/website/docs/advanced.md
Normal file
@ -0,0 +1,207 @@
|
||||
---
|
||||
id: advanced
|
||||
title: Full Text Search & More
|
||||
sidebar_label: More Features
|
||||
---
|
||||
|
||||
## Database Relationships
|
||||
|
||||
In most cases you don't need this configuration, Super Graph will discover and learn
|
||||
the relationship graph within your database automatically. It does this using `Foreign Key` relationships that you have defined in your database schema.
|
||||
|
||||
The below configs are only needed in special cases such as when you don't use foreign keys or when you want to create a relationship between two tables where a foreign key is not defined or cannot be defined.
|
||||
|
||||
For example in the sample below a relationship is defined between the `tags` column on the `posts` table with the `slug` column on the `tags` table. This cannot be defined as using foreign keys since the `tags` column is of type array `text[]` and Postgres for one does not allow foreign keys with array columns.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: posts
|
||||
columns:
|
||||
- name: tags
|
||||
related_to: tags.slug
|
||||
```
|
||||
|
||||
## Advanced Columns
|
||||
|
||||
The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain.
|
||||
|
||||
Super Graph steps in here to help you by supporting these columns right out of the box. It allows you to work with these columns just like you would with tables. Joining data against or modifying array columns using the `connect` or `disconnect` keywords in mutations is fully supported. Another very useful feature is the ability to treat `json` or `binary json (jsonb)` columns as seperate tables, even using them in nested queries joining against related tables. To replicate these features on your own will take a lot of complex SQL. Using Super Graph means you don't have to deal with any of this it just works.
|
||||
|
||||
### Array Columns
|
||||
|
||||
Configure a relationship between an array column `tag_ids` which contains integer id's for tags and the column `id` in the table `tags`.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: posts
|
||||
columns:
|
||||
- name: tag_ids
|
||||
related_to: tags.id
|
||||
```
|
||||
|
||||
```graphql
|
||||
query {
|
||||
posts {
|
||||
title
|
||||
tags {
|
||||
name
|
||||
image
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Column
|
||||
|
||||
Configure a JSON column called `tag_count` in the table `products` into a seperate table. This JSON column contains a json array of objects each with a tag id and a count of the number of times the tag was used. As a seperate table you can nest it into your GraphQL query and treat it like table using any of the standard features like `order_by`, `limit`, `where clauses`, etc.
|
||||
|
||||
The configuration below tells Super Graph to create a synthetic table called `tag_count` using the column `tag_count` from the `products` table. And that this new table has two columns `tag_id` and `count` of the listed types and with the defined relationships.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: tag_count
|
||||
table: products
|
||||
columns:
|
||||
- name: tag_id
|
||||
type: bigint
|
||||
related_to: tags.id
|
||||
- name: count
|
||||
type: int
|
||||
```
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products {
|
||||
name
|
||||
tag_counts {
|
||||
count
|
||||
tag {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Remote Joins
|
||||
|
||||
It often happens that after fetching some data from the DB we need to call another API to fetch some more data and all this combined into a single JSON response. For example along with a list of users you need their last 5 payments from Stripe. This requires you to query your DB for the users and Stripe for the payments. Super Graph handles all this for you also only the fields you requested from the Stripe API are returned.
|
||||
|
||||
:::info Is this fast?
|
||||
Super Graph is able fetch remote data and merge it with the DB response in an efficient manner. Several optimizations such as parallel HTTP requests and a zero-allocation JSON merge algorithm makes this very fast. All of this without you having to write a line of code.
|
||||
:::
|
||||
|
||||
For example you need to list the last 3 payments made by a user. You will first need to look up the user in the database and then call the Stripe API to fetch his last 3 payments. For this to work your user table in the db has a `customer_id` column that contains his Stripe customer ID.
|
||||
|
||||
Similiarly you could also fetch the users last tweet, lead info from Salesforce or whatever else you need. It's fine to mix up several different `remote joins` into a single GraphQL query.
|
||||
|
||||
### Stripe Example
|
||||
|
||||
The configuration is self explanatory. A `payments` field has been added under the `customers` table. This field is added to the `remotes` subsection that defines fields associated with `customers` that are remote and not real database columns.
|
||||
|
||||
The `id` parameter maps a column from the `customers` table to the `$id` variable. In this case it maps `$id` to the `customer_id` column.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: customers
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
# pass_headers:
|
||||
# - cookie
|
||||
# - host
|
||||
set_headers:
|
||||
- name: Authorization
|
||||
value: Bearer <stripe_api_key>
|
||||
```
|
||||
|
||||
#### How do I make use of this?
|
||||
|
||||
Just include `payments` like you would any other GraphQL selector under the `customers` selector. Super Graph will call the configured API for you and stitch (merge) the JSON the API sends back with the JSON generated from the database query. GraphQL features like aliases and fields all work.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
customers {
|
||||
id
|
||||
email
|
||||
payments {
|
||||
customer_id
|
||||
amount
|
||||
billing_details
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
And voila here is the result. You get all of this advanced and honestly complex querying capability without writing a single line of code.
|
||||
|
||||
```json
|
||||
"data": {
|
||||
"customers": [
|
||||
{
|
||||
"id": 1,
|
||||
"email": "linseymertz@reilly.co",
|
||||
"payments": [
|
||||
{
|
||||
"customer_id": "cus_YCj3ndB5Mz",
|
||||
"amount": 100,
|
||||
"billing_details": {
|
||||
"address": "1 Infinity Drive",
|
||||
"zipcode": "94024"
|
||||
}
|
||||
},
|
||||
...
|
||||
```
|
||||
|
||||
Even tracing data is availble in the Super Graph web UI if tracing is enabled in the config. By default it is enabled in development. Additionally there you can set `debug: true` to enable http request / response dumping to help with debugging.
|
||||
|
||||

|
||||
|
||||
## Full text search
|
||||
|
||||
Every app these days needs search. Enought his often means reaching for something heavy like Solr. While this will work why add complexity to your infrastructure when Postgres has really great
|
||||
and fast full text search built-in. And since it's part of Postgres it's also available in Super Graph.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(
|
||||
# Search for all products that contain 'ale' or some version of it
|
||||
search: "ale"
|
||||
|
||||
# Return only matches where the price is less than 10
|
||||
where: { price: { lt: 10 } }
|
||||
|
||||
# Use the search_rank to order from the best match to the worst
|
||||
order_by: { search_rank: desc }
|
||||
) {
|
||||
id
|
||||
name
|
||||
search_rank
|
||||
search_headline_description
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This query will use the `tsvector` column in your database table to search for products that contain the query phrase or some version of it. To get the internal relevance ranking for the search results using the `search_rank` field. And to get the highlighted context within any of the table columns you can use the `search_headline_` field prefix. For example `search_headline_name` will return the contents of the products name column which contains the matching query marked with the `<b></b>` html tags.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"products": [
|
||||
{
|
||||
"id": 11,
|
||||
"name": "Maharaj",
|
||||
"search_rank": 0.243171,
|
||||
"search_headline_description": "Blue Moon, Vegetable Beer, Willamette, 1007 - German <b>Ale</b>, 48 IBU, 7.9%, 11.8°Blg"
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"name": "Schneider Aventinus",
|
||||
"search_rank": 0.243171,
|
||||
"search_headline_description": "Dos Equis, Wood-aged Beer, Magnum, 1099 - Whitbread <b>Ale</b>, 15 IBU, 9.5%, 13.0°Blg"
|
||||
},
|
||||
...
|
||||
```
|
335
docs/website/docs/config.md
Normal file
335
docs/website/docs/config.md
Normal file
@ -0,0 +1,335 @@
|
||||
---
|
||||
id: config
|
||||
title: Configuration
|
||||
sidebar_label: Configuration
|
||||
---
|
||||
|
||||
Configuration files can either be in YAML or JSON their names are derived from the `GO_ENV` variable, for example `GO_ENV=prod` will cause the `prod.yaml` config file to be used. or `GO_ENV=dev` will use the `dev.yaml`. A path to look for the config files in can be specified using the `-path <folder>` command line argument.
|
||||
|
||||
We're tried to ensure that the config file is self documenting and easy to work with.
|
||||
|
||||
```yaml
|
||||
# Inherit config from this other config file
|
||||
# so I only need to overwrite some values
|
||||
inherits: base
|
||||
|
||||
app_name: "Super Graph Development"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: true
|
||||
|
||||
# debug, error, warn, info
|
||||
log_level: "debug"
|
||||
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: false
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
auth_fail_block: false
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# Watch the config folder and reload Super Graph
|
||||
# with the new configs when a change is detected
|
||||
reload_on_config_change: true
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
# SG_DATABASE_USER
|
||||
# SG_DATABASE_PASSWORD
|
||||
|
||||
# Auth related environment Variables
|
||||
# SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
|
||||
# SG_AUTH_RAILS_REDIS_URL
|
||||
# SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
# SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
|
||||
# inflections:
|
||||
# person: people
|
||||
# sheep: sheep
|
||||
|
||||
auth:
|
||||
# Can be 'rails' or 'jwt'
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header for testing.
|
||||
# Disable in production
|
||||
creds_in_header: true
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
# Remote cookie store. (memcache or redis)
|
||||
# url: redis://redis:6379
|
||||
# password: ""
|
||||
# max_idle: 80
|
||||
# max_active: 12000
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
# sign_salt: "signed encrypted cookie"
|
||||
# auth_salt: "authenticated encrypted cookie"
|
||||
|
||||
# jwt:
|
||||
# provider: auth0
|
||||
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
# public_key_file: /secrets/public_key.pem
|
||||
# public_key_type: ecdsa #rsa
|
||||
# header:
|
||||
# name: dnt
|
||||
# exists: true
|
||||
# value: localhost:8080
|
||||
|
||||
# You can add additional named auths to use with actions
|
||||
# In this example actions using this auth can only be
|
||||
# called from the Google Appengine Cron service that
|
||||
# sets a special header to all it's requests
|
||||
auths:
|
||||
- name: from_taskqueue
|
||||
type: header
|
||||
header:
|
||||
name: X-Appengine-Cron
|
||||
exists: true
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: app_development
|
||||
user: postgres
|
||||
password: postgres
|
||||
|
||||
#schema: "public"
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
|
||||
# Set up an secure tls encrypted db connection
|
||||
enable_tls: false
|
||||
|
||||
# Required for tls. For example with Google Cloud SQL it's
|
||||
# <gcp-project-id>:<cloud-sql-instance>"
|
||||
# server_name: blah
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# server_cert: ./server-ca.pem
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_cert: ./client-cert.pem
|
||||
# Required for tls. Can be a file path or the contents of the pem file
|
||||
# client_key: ./client-key.pem
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
admin_account_id: "5"
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
# Create custom actions with their own api endpoints
|
||||
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
|
||||
# A request to this url will execute the configured SQL query
|
||||
# which in this case refreshes a materialized view in the database.
|
||||
# The auth_name is from one of the configured auths
|
||||
actions:
|
||||
- name: refresh_leaderboard_users
|
||||
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
|
||||
auth_name: from_taskqueue
|
||||
|
||||
tables:
|
||||
- name: customers
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
pass_headers:
|
||||
- cookie
|
||||
set_headers:
|
||||
- name: Host
|
||||
value: 0.0.0.0
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
|
||||
roles_query: "SELECT * FROM users WHERE id = $user_id"
|
||||
|
||||
roles:
|
||||
- name: anon
|
||||
tables:
|
||||
- name: products
|
||||
limit: 10
|
||||
|
||||
query:
|
||||
columns: ["id", "name", "description"]
|
||||
aggregation: false
|
||||
|
||||
insert:
|
||||
allow: false
|
||||
|
||||
update:
|
||||
allow: false
|
||||
|
||||
delete:
|
||||
allow: false
|
||||
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
query:
|
||||
limit: 50
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns: ["id", "name", "description"]
|
||||
disable_functions: false
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns: ["id", "name", "description"]
|
||||
set:
|
||||
- created_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
set:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: id = 1000
|
||||
tables:
|
||||
- name: users
|
||||
filters: []
|
||||
```
|
||||
|
||||
If deploying into environments like Kubernetes it's useful to be able to configure things like secrets and hosts though environment variables therfore we expose the below environment variables. This is escpecially useful for secrets since they are usually injected in via a secrets management framework ie. Kubernetes Secrets
|
||||
|
||||
Keep in mind any value can be overwritten using environment variables for example `auth.jwt.public_key_type` converts to `SG_AUTH_JWT_PUBLIC_KEY_TYPE`. In short prefix `SG_`, upper case and all `.` should changed to `_`.
|
||||
|
||||
#### Postgres environment variables
|
||||
|
||||
```bash
|
||||
SG_DATABASE_HOST
|
||||
SG_DATABASE_PORT
|
||||
SG_DATABASE_USER
|
||||
SG_DATABASE_PASSWORD
|
||||
```
|
||||
|
||||
#### Auth environment variables
|
||||
|
||||
```bash
|
||||
SG_AUTH_RAILS_COOKIE_SECRET_KEY_BASE
|
||||
SG_AUTH_RAILS_REDIS_URL
|
||||
SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
```
|
||||
|
||||
## YugabyteDB
|
||||
|
||||
Yugabyte is an open-source, geo-distrubuted cloud-native relational DB that scales horizontally. Super Graph works with Yugabyte right out of the box. If you think you're data needs will outgrow Postgres and you don't really want to deal with sharding then Yugabyte is the way to go. Just point Super Graph to your Yugabyte DB and everything will just work including running migrations, seeding, querying, mutations, etc.
|
||||
|
||||
To use Yugabyte in your local development flow just uncomment the following lines in the `docker-compose.yml` file that is part of your Super Graph app. Also remember to comment out the originl postgres `db` config.
|
||||
|
||||
```yaml
|
||||
# Postgres DB
|
||||
# db:
|
||||
# image: postgres:latest
|
||||
# ports:
|
||||
# - "5432:5432"
|
||||
|
||||
#Standard config to run a single node of Yugabyte
|
||||
yb-master:
|
||||
image: yugabytedb/yugabyte:latest
|
||||
container_name: yb-master-n1
|
||||
command: [ "/home/yugabyte/bin/yb-master",
|
||||
"--fs_data_dirs=/mnt/disk0,/mnt/disk1",
|
||||
"--master_addresses=yb-master-n1:7100",
|
||||
"--replication_factor=1",
|
||||
"--enable_ysql=true"]
|
||||
ports:
|
||||
- "7000:7000"
|
||||
environment:
|
||||
SERVICE_7000_NAME: yb-master
|
||||
|
||||
db:
|
||||
image: yugabytedb/yugabyte:latest
|
||||
container_name: yb-tserver-n1
|
||||
command: [ "/home/yugabyte/bin/yb-tserver",
|
||||
"--fs_data_dirs=/mnt/disk0,/mnt/disk1",
|
||||
"--start_pgsql_proxy",
|
||||
"--tserver_master_addrs=yb-master-n1:7100"]
|
||||
ports:
|
||||
- "9042:9042"
|
||||
- "6379:6379"
|
||||
- "5433:5433"
|
||||
- "9000:9000"
|
||||
environment:
|
||||
SERVICE_5433_NAME: ysql
|
||||
SERVICE_9042_NAME: ycql
|
||||
SERVICE_6379_NAME: yedis
|
||||
SERVICE_9000_NAME: yb-tserver
|
||||
depends_on:
|
||||
- yb-master
|
||||
|
||||
# Environment variables to point Super Graph to Yugabyte
|
||||
# This is required since it uses a different user and port number
|
||||
yourapp_api:
|
||||
image: dosco/super-graph:latest
|
||||
environment:
|
||||
GO_ENV: "development"
|
||||
Uncomment below for Yugabyte DB
|
||||
SG_DATABASE_PORT: 5433
|
||||
SG_DATABASE_USER: yugabyte
|
||||
SG_DATABASE_PASSWORD: yugabyte
|
||||
volumes:
|
||||
- ./config:/config
|
||||
ports:
|
||||
- "8080:8080"
|
||||
depends_on:
|
||||
- db
|
||||
```
|
159
docs/website/docs/deploy.md
Normal file
159
docs/website/docs/deploy.md
Normal file
@ -0,0 +1,159 @@
|
||||
---
|
||||
id: deploy
|
||||
title: How to deploy Super Graph
|
||||
sidebar_label: Deploy to Prod.
|
||||
---
|
||||
|
||||
Since you're reading this you're probably considering deploying Super Graph. You're in luck it's really easy and there are several ways to choose from. Keep in mind Super Graph can be used as a pre-built docker image or you can easily customize it and build your own docker image.
|
||||
|
||||
:::info JWT tokens (Auth0, etc)
|
||||
When deploying on a subdomain and configure this service to use JWT authentication. You will need the public key file or secret key. Ensure your web app passes the JWT token with every GraphQL request (Cookie recommended). You have to add the web domain to the `cors_allowed_origins` config option so CORS can allow the browser to do cross-domain ajax requests.
|
||||
:::
|
||||
|
||||
## Google Cloud Run (Fully Managed)
|
||||
|
||||
Cloud Run is a fully managed compute platform for deploying and scaling containerized applications quickly and securely.
|
||||
Your Super Graph app comes with a `cloudbuild.yaml` file so it's really easy to use Google Cloud Build to build and deploy your Super Graph app to Google Cloud Run.
|
||||
|
||||
:::note
|
||||
Remember to give Cloud Build permission to deploy to Cloud Run first this can be done in the Cloud Build settings screen. Also the service account you use with Cloud Run must have the IAM permissions to connect to CloudSQL. https://cloud.google.com/sql/docs/postgres/connect-run
|
||||
:::
|
||||
|
||||
Use the command below to tell Cloud Build to build and deploy your app.
|
||||
|
||||
```bash
|
||||
gcloud build submit --substitutions=SERVICE_ACCOUNT=admin@my-project.iam.gserviceaccount.com,REGION=us-central1 .
|
||||
```
|
||||
|
||||
:::info Secrets Management
|
||||
Your secrets like the database password should be managed by the Mozilla SOPS app. This is a secrets management app that encrypts all your secrets and stores them in a file to be decrypted in production using the Cloud KMS (Google Cloud KMS Or Amazon KMS). Our cloud build file above expects the secrets file to be `config/prod.secrets.yml`. You can find more information on Mozilla SOPS on their site. https://github.com/mozilla/sops
|
||||
:::
|
||||
|
||||
## Build Docker Image Locally
|
||||
|
||||
If for whatever reason you decide to build your own Docker images then just use the command below.
|
||||
|
||||
```bash
|
||||
docker build -t your-api-service-name .
|
||||
```
|
||||
|
||||
## With a Rails app
|
||||
|
||||
Super Graph can read Rails session cookies, like those created by authentication gems (Devise or Warden). Based on how you've configured your Rails app the cookie can be signed, encrypted, both, include the user ID or just have the ID of the session. If you have choosen to use Redis or Memcache as your session store then Super Graph can read the session cookie and then lookup the user in the session store. In short it works really well with almost all Rails apps.
|
||||
|
||||
For any of this to work Super Graph must be deployed in a way that make the browser send the apps cookie to it along with the GraphQL query. That means Super Graph needs to be either on the same domain as your app or on a subdomain.
|
||||
|
||||
:::info I need an example
|
||||
Say your Rails app runs on `myrailsapp.com` then Super Graph should be on the same domain or on a subdomain like `graphql.myrailsapp.com`. If you choose subdomain then remeber read the [Deploy under a subdomain](#deploy-under-a-subdomain) section.
|
||||
:::
|
||||
|
||||
## Deploy under a subdomain
|
||||
|
||||
For this to work you have to ensure that the option `:domain => :all` is added to your Rails app config `Application.config.session_store` this will cause your rails app to create session cookies that can be shared with sub-domains. More info here [/sharing-a-devise-user-session-across-subdomains-with-rails](http://excid3.com/blog/sharing-a-devise-user-session-across-subdomains-with-rails-3/)
|
||||
|
||||
## With NGINX
|
||||
|
||||
If your infrastructure is fronted by NGINX then it should be configured so that all requests to your GraphQL API path are proxyed to Super Graph. In the example NGINX config below all requests to the path `/api/v1/graphql` are routed to wherever you have Super Graph installed within your architecture. This example is derived from the config file example at [/microservices-nginx-gateway/nginx.conf](https://github.com/launchany/microservices-nginx-gateway/blob/master/nginx.conf)
|
||||
|
||||
:::info NGINX with sub-domain
|
||||
Yes, NGINX is very flexible and you can configure it to keep Super Graph a subdomain instead of on the same top level domain. I'm sure a little Googleing will get you some great example configs for that.
|
||||
:::
|
||||
|
||||
```nginx
|
||||
# Configuration for the server
|
||||
server {
|
||||
|
||||
# Running port
|
||||
listen 80;
|
||||
|
||||
# Proxy the graphql api path to Super Graph
|
||||
location /api/v1/graphql {
|
||||
|
||||
proxy_pass http://super-graph-service:8080;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
|
||||
}
|
||||
|
||||
# Proxying all other paths to your Rails app
|
||||
location / {
|
||||
|
||||
proxy_pass http://your-rails-app:3000;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## On Kubernetes
|
||||
|
||||
If your Rails app runs on Kubernetes then ensure you have an ingress config deployed that points the path to the service that you have deployed Super Graph under.
|
||||
|
||||
### Ingress config
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: simple-rails-app
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: myrailsapp.com
|
||||
http:
|
||||
paths:
|
||||
- path: /api/v1/graphql
|
||||
backend:
|
||||
serviceName: graphql-service
|
||||
servicePort: 8080
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: rails-app
|
||||
servicePort: 3000
|
||||
```
|
||||
|
||||
### Service and deployment config
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: graphql-service
|
||||
labels:
|
||||
run: super-graph
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
selector:
|
||||
run: super-graph
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: super-graph
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
run: super-graph
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: super-graph
|
||||
spec:
|
||||
containers:
|
||||
- name: super-graph
|
||||
image: docker.io/dosco/super-graph:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
```
|
654
docs/website/docs/graphql.md
Normal file
654
docs/website/docs/graphql.md
Normal file
@ -0,0 +1,654 @@
|
||||
---
|
||||
id: graphql
|
||||
title: How to GraphQL
|
||||
sidebar_label: GraphQL Syntax
|
||||
---
|
||||
|
||||
GraphQL (GQL) is a simple query syntax that's fast replacing REST APIs. GQL is great since it allows web developers to fetch the exact data that they need without depending on changes to backend code. Also if you squint hard enough it looks a little bit like JSON :smiley:
|
||||
|
||||
The below query will fetch an `users` name, email and avatar image (renamed as picture). If you also need the users `id` then just add it to the query.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
user {
|
||||
full_name
|
||||
email
|
||||
picture: avatar
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Multiple tables can also be fetched using a single GraphQL query. This is very fast since the entire query is converted into a single SQL query which the database can efficiently run.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
user {
|
||||
full_name
|
||||
email
|
||||
}
|
||||
products {
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Fetching data
|
||||
|
||||
To fetch a specific `product` by it's ID you can use the `id` argument. The real name id field will be resolved automatically so this query will work even if your id column is named something like `product_id`.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(id: 3) {
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Postgres also supports full text search using a TSV index. Super Graph makes it easy to use this full text search capability using the `search` argument.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(search: "ale") {
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Sorting
|
||||
|
||||
To sort or ordering results just use the `order_by` argument. This can be combined with `where`, `search`, etc to build complex queries to fit you needs.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(order_by: { cached_votes_total: desc }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
Super Graph support complex queries where you can add filters, ordering,offsets and limits on the query. For example the below query will list all products where the price is greater than 10 and the id is not 5.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(where: { and: { price: { gt: 10 }, not: { id: { eq: 5 } } } }) {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Nested where clause targeting related tables
|
||||
|
||||
Sometimes you need to query a table based on a condition that applies to a related table. For example say you need to list all users who belong to an account. This query below will fetch the id and email or all users who belong to the account with id 3.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
users(where: {
|
||||
accounts: { id: { eq: 3 } }
|
||||
}) {
|
||||
id
|
||||
email
|
||||
}
|
||||
}`
|
||||
```
|
||||
|
||||
#### Logical Operators
|
||||
|
||||
| Name | Example | Explained |
|
||||
| ---- | ------------------------------------------------------------ | ------------------------------- |
|
||||
| and | price : { and : { gt: 10.5, lt: 20 } | price > 10.5 AND price < 20 |
|
||||
| or | or : { price : { greater_than : 20 }, quantity: { gt : 0 } } | price >= 20 OR quantity > 0 |
|
||||
| not | not: { or : { quantity : { eq: 0 }, price : { eq: 0 } } } | NOT (quantity = 0 OR price = 0) |
|
||||
|
||||
#### Other conditions
|
||||
|
||||
| Name | Example | Explained |
|
||||
| ---------------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| eq, equals | id : { eq: 100 } | id = 100 |
|
||||
| neq, not_equals | id: { not_equals: 100 } | id != 100 |
|
||||
| gt, greater_than | id: { gt: 100 } | id > 100 |
|
||||
| lt, lesser_than | id: { gt: 100 } | id < 100 |
|
||||
| gte, greater_or_equals | id: { gte: 100 } | id >= 100 |
|
||||
| lte, lesser_or_equals | id: { lesser_or_equals: 100 } | id <= 100 |
|
||||
| in | status: { in: [ "A", "B", "C" ] } | status IN ('A', 'B', 'C) |
|
||||
| nin, not_in | status: { in: [ "A", "B", "C" ] } | status IN ('A', 'B', 'C) |
|
||||
| like | name: { like "phil%" } | Names starting with 'phil' |
|
||||
| nlike, not_like | name: { nlike "v%m" } | Not names starting with 'v' and ending with 'm' |
|
||||
| ilike | name: { ilike "%wOn" } | Names ending with 'won' case-insensitive |
|
||||
| nilike, not_ilike | name: { nilike "%wOn" } | Not names ending with 'won' case-insensitive |
|
||||
| similar | name: { similar: "%(b\|d)%" } | [Similar Docs](https://www.postgresql.org/docs/9/functions-matching.html#FUNCTIONS-SIMILARTO-REGEXP) |
|
||||
| nsimilar, not_similar | name: { nsimilar: "%(b\|d)%" } | [Not Similar Docs](https://www.postgresql.org/docs/9/functions-matching.html#FUNCTIONS-SIMILARTO-REGEXP) |
|
||||
| has_key | column: { has_key: 'b' } | Does JSON column contain this key |
|
||||
| has_key_any | column: { has_key_any: [ a, b ] } | Does JSON column contain any of these keys |
|
||||
| has_key_all | column: [ a, b ] | Does JSON column contain all of this keys |
|
||||
| contains | column: { contains: [1, 2, 4] } | Is this array/json column a subset of value |
|
||||
| contained_in | column: { contains: "{'a':1, 'b':2}" } | Is this array/json column a subset of these value |
|
||||
| is_null | column: { is_null: true } | Is column value null or not |
|
||||
|
||||
### Aggregations
|
||||
|
||||
You will often find the need to fetch aggregated values from the database such as `count`, `max`, `min`, etc. This is simple to do with GraphQL, just prefix the aggregation name to the field name that you want to aggregrate like `count_id`. The below query will group products by name and find the minimum price for each group. Notice the `min_price` field we're adding `min_` to price.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products {
|
||||
name
|
||||
min_price
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Name | Explained |
|
||||
| ----------- | ---------------------------------------------------------------------- |
|
||||
| avg | Average value |
|
||||
| count | Count the values |
|
||||
| max | Maximum value |
|
||||
| min | Minimum value |
|
||||
| stddev | [Standard Deviation](https://en.wikipedia.org/wiki/Standard_deviation) |
|
||||
| stddev_pop | Population Standard Deviation |
|
||||
| stddev_samp | Sample Standard Deviation |
|
||||
| variance | [Variance](https://en.wikipedia.org/wiki/Variance) |
|
||||
| var_pop | Population Standard Variance |
|
||||
| var_samp | Sample Standard variance |
|
||||
|
||||
All kinds of queries are possible with GraphQL. Below is an example that uses a lot of the features available. Comments `# hello` are also valid within queries.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc }
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [price]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Custom Functions
|
||||
|
||||
Any function defined in the database like the below `add_five` that adds 5 to any number given to it can be used
|
||||
within your query. The one limitation is that it should be a function that only accepts a single argument. The function is used within you're GraphQL in similar way to how aggregrations are used above. Example below
|
||||
|
||||
```grahql
|
||||
query {
|
||||
thread(id: 5) {
|
||||
id
|
||||
total_votes
|
||||
add_five_total_votes
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Postgres user-defined function `add_five`
|
||||
|
||||
```
|
||||
CREATE OR REPLACE FUNCTION add_five(a integer) RETURNS integer AS $$
|
||||
BEGIN
|
||||
|
||||
RETURN a + 5;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
```
|
||||
|
||||
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
|
||||
|
||||
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
|
||||
|
||||
### Insert
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Bulk insert
|
||||
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
},
|
||||
{
|
||||
"name": "Compilers: Principles, Techniques, and Tools",
|
||||
"description": "Known to professors, students, and developers worldwide as the 'Dragon Book' is available in a new edition",
|
||||
"price": 93.74
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Update
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 200.0
|
||||
},
|
||||
"product_id": 5
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(update: $data, id: $product_id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Bulk update
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 500.0
|
||||
},
|
||||
"gt_product_id": 450.0,
|
||||
"lt_product_id:": 550.0
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(
|
||||
update: $data
|
||||
where: { price: { gt: $gt_product_id, lt: lt_product_id } }
|
||||
) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Delete
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 500.0
|
||||
},
|
||||
"product_id": 5
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(delete: true, id: $product_id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Bulk delete
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 500.0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(delete: true, where: { price: { eq: { 500.0 } } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Upsert
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"id": 5,
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(upsert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Bulk upsert
|
||||
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"id": 5,
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"name": "Compilers: Principles, Techniques, and Tools",
|
||||
"description": "Known to professors, students, and developers worldwide as the 'Dragon Book' is available in a new edition",
|
||||
"price": 93.74
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(upsert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest.
|
||||
|
||||
### Nested Insert
|
||||
|
||||
Create a product item first and then assign it to a user
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Or it's reverse, create the user first and then his product
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nested Update
|
||||
|
||||
Update a product item first and then assign it to a user
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(update: $data, id: 5) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Or it's reverse, update a user first and then his product
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"email": "newemail@me.com",
|
||||
"full_name": "The Dude",
|
||||
"product": {
|
||||
"name": "Banana",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
user(update: $data, id: 1) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Pagination
|
||||
|
||||
This is a must have feature of any API. When you want your users to go through a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
|
||||
|
||||
Limit-Offset
|
||||
This is simple enough but also inefficient when working with a large number of total items. Limit, limits the number of items fetched and offset is the point you want to fetch from. The below query will fetch 10 results at a time starting with the 100th item. You will have to keep updating offset (110, 120, 130, etc ) to walk thought the results so make offset a variable.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(limit: 10, offset: 100) {
|
||||
id
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Cursor
|
||||
|
||||
This is a powerful and highly efficient way to paginate a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward or backward through the results. If you plan to implement infinite scroll this is the option you should choose.
|
||||
|
||||
When going this route the results will contain a cursor value this is an encrypted string that you don't have to worry about just pass this back in to the next API call and you'll received the next set of results. The cursor value is encrypted since its contents should only matter to Super Graph and not the client. Also since the primary key is used for this feature it's possible you might not want to leak it's value to clients.
|
||||
|
||||
You will need to set this config value to ensure the encrypted cursor data is secure. If not set a random value is used which will change with each deployment breaking older cursor values that clients might be using so best to set it.
|
||||
|
||||
```yaml
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
secret_key: supercalifajalistics
|
||||
```
|
||||
|
||||
Paginating forward through your results
|
||||
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"cursor": "MJoTLbQF4l0GuoDsYmCrpjPeaaIlNpfm4uFU4PQ="
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(first: 10, after: $cursor) {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Paginating backward through your results
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(last: 10, before: $cursor) {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
"data": {
|
||||
"products": [
|
||||
{
|
||||
"slug": "eius-nulla-et-8",
|
||||
"name" "Pale Ale"
|
||||
},
|
||||
{
|
||||
"slug": "sapiente-ut-alias-12",
|
||||
"name" "Brown Ale"
|
||||
}
|
||||
...
|
||||
],
|
||||
"products_cursor": "dJwHassm5+d82rGydH2xQnwNxJ1dcj4/cxkh5Cer"
|
||||
}
|
||||
```
|
||||
|
||||
Nested tables can also have cursors. Requesting multiple cursors are supported on a single request but when paginating using a cursor only one table is currently supported. To explain this better, you can only use a `before` or `after` argument with a cursor value to paginate a single table in a query.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(last: 10) {
|
||||
slug
|
||||
name
|
||||
customers(last: 5) {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Multiple order-by arguments are supported. Super Graph is smart enough to allow cursor based pagination when you also need complex sort order like below.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(
|
||||
last: 10
|
||||
before: $cursor
|
||||
order_by: [ price: desc, total_customers: asc ]) {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Using Variables
|
||||
|
||||
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The built-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
|
||||
and better client side code.
|
||||
|
||||
```javascript
|
||||
// Define the request object keeping the query and the variables seperate
|
||||
var req = {
|
||||
query: "{ product(id: $product_id) { name } }",
|
||||
variables: { product_id: 5 },
|
||||
};
|
||||
|
||||
// Use the fetch api to make the query
|
||||
fetch("http://localhost:8080/api/v1/graphql", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify(req),
|
||||
})
|
||||
.then((res) => res.json())
|
||||
.then((res) => console.log(res.data));
|
||||
```
|
190
docs/website/docs/home.md
Normal file
190
docs/website/docs/home.md
Normal file
@ -0,0 +1,190 @@
|
||||
---
|
||||
id: home
|
||||
title: Super Graph
|
||||
hide_title: true
|
||||
sidebar_label: Home
|
||||
---
|
||||
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl'; // Add to the top of the file below the front matter.
|
||||
|
||||
<div class="hero shadow--lw margin-bottom--lg">
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col col--2">
|
||||
<img
|
||||
class="avatar__photo avatar__photo--xl"
|
||||
alt="Super Graph Logo"
|
||||
src={useBaseUrl('img/super-graph-logo.svg')}
|
||||
height="70"
|
||||
/>
|
||||
</div>
|
||||
<div class="col col--10"><h1 class="hero__title">Super Graph</h1></div>
|
||||
</div>
|
||||
<p class="hero__subtitle">Fetch data without code!</p>
|
||||
<div class="margin-bottom--lg">
|
||||
<a class="button button--secondary button--outline button--lg" href="start">
|
||||
Skip Intro
|
||||
</a>
|
||||
</div>
|
||||
<p>Stop fighting ORM's and complex SQL just to fetch the data you need. Instead try Super Graph it automagically tranforms GraphQL into efficient SQL.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
:::info cut development time
|
||||
80% of all web app development is either reading from or writing to a database. 100x your developer productivity and save valuable time by making that super simple.
|
||||
:::
|
||||
|
||||
### Fetching data with GraphQL
|
||||
|
||||
Just imagine the code or SQL you'll need to fetch this data, the user, all his posts, all the votes on the posts, the authors information and the related tags. Oh yeah and you also need efficient cursor based pagination. And Remember you also need to maintain this code forever.
|
||||
|
||||
Instead just describe the data you need in GraphQL and give that to Super Graph it'll automatically learn your database and generate the most efficient SQL query fetching your data in the JSON structure you expected.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
user(id: 5) {
|
||||
id
|
||||
first_name
|
||||
last_name
|
||||
picture_url
|
||||
posts(first: 20, order_by: { score: desc }) {
|
||||
slug
|
||||
title
|
||||
created_at
|
||||
votes_total
|
||||
votes {
|
||||
created_at
|
||||
}
|
||||
author {
|
||||
id
|
||||
name
|
||||
}
|
||||
tags {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
posts_cursor
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Instant results
|
||||
|
||||
Here's the data Super Graph fetched using the GraphQL above, it's even in the JSON structure you
|
||||
wanted it in. All this without you writing any code or SQL.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"user": {
|
||||
"id": 5,
|
||||
"threads": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "This is a sample tite for this thread.",
|
||||
"topics": [
|
||||
{
|
||||
"id": 3,
|
||||
"name": "CloudRun"
|
||||
}
|
||||
]
|
||||
}]
|
||||
},
|
||||
"posts": [
|
||||
{
|
||||
"id": 1477,
|
||||
"body": "These are some example contents for this post.",
|
||||
"slug": "monitor-negotiate-store-1476",
|
||||
"votes": [],
|
||||
"author": {
|
||||
"id": 5,
|
||||
"email": "jordanecruickshank@ferry.io"
|
||||
},
|
||||
"created_at": "2020-05-13T13:51:21.729501+00:00"
|
||||
},
|
||||
...
|
||||
],
|
||||
"posts_cursor": "a8d4j2k9d83dy373hd2nskw2sjs8"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## How do I use it?
|
||||
|
||||
Super Graph can be used in two ways. You can run it as a standalone service serving as an API backend for your app or as a library within your own app code. Super Graph is built in GO a secure and high-performance language from Google used to build cloud infratructure.
|
||||
|
||||
### Using it as a service
|
||||
|
||||
```bash
|
||||
go get github.com/dosco/super-graph
|
||||
super-graph new <app_name>
|
||||
```
|
||||
|
||||
### Using it in your own code
|
||||
|
||||
```bash
|
||||
go get github.com/dosco/super-graph/core
|
||||
```
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/dosco/super-graph/core"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
posts {
|
||||
id
|
||||
title
|
||||
}
|
||||
}`
|
||||
|
||||
ctx = context.WithValue(ctx, core.UserIDKey, 1)
|
||||
|
||||
res, err := sg.GraphQL(ctx, query, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
}
|
||||
```
|
||||
|
||||
## Follow us for updates
|
||||
|
||||
For when you need help or just want to stay in the loop
|
||||
[Twitter](https://twitter.com/dosco) or [Discord](https://discord.gg/6pSWCTZ).
|
||||
|
||||
## Why I created Super Graph
|
||||
|
||||
After working on several products through my career I found that we spend way too much time on building API backends. Most APIs also need constant updating, and this costs time and money.
|
||||
|
||||
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.
|
||||
|
||||
I didn't want to write this code anymore, I wanted the computer to do it. Enter GraphQL, to me it sounded great, but it still required me to write all the same database query code.
|
||||
|
||||
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.
|
||||
|
||||
This compiler is what sits at the heart of Super Graph, with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations, and everything else needed for you to build production-ready apps with it.
|
||||
|
||||
## Apache License 2.0
|
||||
|
||||
Apache Public License 2.0 | Copyright © 2018-present Vikram Rangnekar
|
271
docs/website/docs/internals.md
Normal file
271
docs/website/docs/internals.md
Normal file
@ -0,0 +1,271 @@
|
||||
---
|
||||
id: internals
|
||||
title: Codebase Explained
|
||||
sidebar_label: Code Internals
|
||||
---
|
||||
|
||||
Super Graph code is made up of a number of packages. We have done our best to keep each package small and focused. Let us begin by looking at some of these packages.
|
||||
|
||||
1. qcode - GraphQL lexer and parser.
|
||||
2. psql - SQL generator
|
||||
3. serv - HTTP Endpoint, Configs, CLI, etc
|
||||
4. rails - Rails cookie and session store decoders
|
||||
|
||||
## QCODE
|
||||
|
||||
This package contains the core of the GraphQL compiler it handling the lexing and parsing of the GraphQL query transforming it into an internal representation called
|
||||
`QCode`.
|
||||
|
||||
This is the first step of the compiling process the `func NewCompiler(c Config)` function creates a new instance of this compiler which has it's own config.
|
||||
|
||||
Keep in mind QCode has no knowledge of the Database structure it is designed to be a fast GraphQL parser. Care is taken to keep memory allocations to a minimum.
|
||||
|
||||
```go
|
||||
const (
|
||||
opQuery
|
||||
opMutate
|
||||
...
|
||||
)
|
||||
|
||||
type QCode struct {
|
||||
Type QType
|
||||
Selects []Select
|
||||
...
|
||||
}
|
||||
|
||||
type Select struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Args map[string]*Node
|
||||
Name string
|
||||
FieldName string
|
||||
Cols []Column
|
||||
Where *Exp
|
||||
OrderBy []*OrderBy
|
||||
DistinctOn []string
|
||||
Paging Paging
|
||||
Children []int32
|
||||
Functions bool
|
||||
Allowed map[string]struct{}
|
||||
PresetMap map[string]string
|
||||
PresetList []string
|
||||
}
|
||||
```
|
||||
|
||||
But before the incoming GraphQL query can be turned into QCode it must first be tokenzied by the lexer `lex.go`. As the tokenzier walks the bytes of the query it generates tokens `item` structs which are then consumed by the next step the parser `parse.go`.
|
||||
|
||||
```go
|
||||
type item struct {
|
||||
typ itemType
|
||||
pos Pos
|
||||
end Pos
|
||||
}
|
||||
```
|
||||
|
||||
For exmple a simple query like `query getUser { user { id } }` will be converted into several tokens like below.
|
||||
|
||||
```go
|
||||
item{itemQuery, 0, 4} // query
|
||||
item{itemName, 6, 12} // getUser
|
||||
item{itemObjOpen, 16, 20} // {
|
||||
...
|
||||
```
|
||||
|
||||
These tokens are then fed into the parser `parse.go` the parser does the work of generating an abstract syntax tree (AST) from the tokens. This AST is an internal representation (data structure) and is not exposed outside the package. Since the AST is a tree a stack `stack.go` is used to walk the tree and generate the QCode AST. The QCode data structure is also a tree (represented as an array). This is then returned to the caller of the compile function.
|
||||
|
||||
```go
|
||||
type Operation struct {
|
||||
Type parserType
|
||||
Name string
|
||||
Args []Arg
|
||||
Fields []Field
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Name string
|
||||
Alias string
|
||||
Args []Arg
|
||||
Children []int32
|
||||
}
|
||||
```
|
||||
|
||||
## PSQL
|
||||
|
||||
This package is responsible for generating Postgres SQL from the QCode AST. There are various GraphQL query types (Query, Mutation, etc). And several more sub types like single root or multi-root queries, various types of mutations (insert, update delete, bulk insert, etc). This package is designed to be able to generate SQL for all of those types.
|
||||
|
||||
In addition to QCode variable data is also passed to the compile function within this package. Variables are decoded to derive what is being inserted and what kind of insert is it single or bulk. This information is not available in the GraphQL query its passed in seperatly via variables. This package is able to put all this together and generate the right SQL code.
|
||||
|
||||
The entry point of this package is in `query.go`. The database schema must be passed in the config object when creating a new compiler instance `NewCompiler`. The functions to extract this schema from the database are also part of this package `tables.go`. The `GetTables` functions fetches all the tables from the database and `GetColumns` fetches columns and relationship information.
|
||||
|
||||
```go
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{conf.Schema, conf.Vars}
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQuery(qc, w)
|
||||
case qcode.QTInsert, qcode.QTUpdate, qcode.QTDelete, qcode.QTUpsert:
|
||||
return co.compileMutation(qc, w, vars)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Unknown operation type %d", qc.Type)
|
||||
}
|
||||
```
|
||||
|
||||
GraphQL, input is first converted to QCode.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
user {
|
||||
id
|
||||
}
|
||||
posts {
|
||||
title
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
SQL, in reality the generated SQL is far more complex single it has to be very efficient, leverage the power of Postgres, support RBAC (Role based access control) and all of this must be done in a single SQL query.
|
||||
|
||||
```sql
|
||||
SELECT users.id, posts.title FROM users, posts;
|
||||
```
|
||||
|
||||
## SERV
|
||||
|
||||
The `serv` package constains most of code that turns the above compiler into an HTTP service. It also includes authentication middleware, remote join resolvers, config parsering, database migrations and seeding commands.
|
||||
|
||||
Another big feature that this package handles is the `allow.list` management code. In production mode parsing the allow list file and registering prepared statements to adding GraphQL queries to this file in development mode.
|
||||
|
||||
Currently the following global variables are referrenced across the package. In future I'd prefer to move these into a context struct and pass that around instead.
|
||||
|
||||
```go
|
||||
var (
|
||||
logger zerolog.Logger // logger for everything but errors
|
||||
errlog zerolog.Logger // logger for errors includes line numbers
|
||||
conf *config // parsed config
|
||||
confPath string // path to the config file
|
||||
db *pgxpool.Pool // database connection pool
|
||||
schema *psql.DBSchema // database tables, columns and relationships
|
||||
qcompile *qcode.Compiler // qcode compiler
|
||||
pcompile *psql.Compiler // postgres sql compiler
|
||||
)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
There are several unit tests and benchmark tests `parse_test.go`) included. There are also scripts included for memory `pprof_cpu.sh` and cpu `pprof_cpu.sh` profiling.
|
||||
|
||||
```go
|
||||
// Test to ensure synthetic tables gnerate the correct SQL
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('me', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT ) AS "json_row_0")) AS "json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil, "user")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can run tests within each package or across the entire app. It is usually the fastest to first write a test and then build the feature to satisfy it.
|
||||
|
||||
```
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
Memory profiling can help find where allocations are happining within the package code.
|
||||
|
||||
```bash
|
||||
$ cd ./psql
|
||||
$ ./pprof_mem.sh
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompile-8 52567 19401 ns/op 3918 B/op 61 allocs/op
|
||||
BenchmarkCompileParallel-8 219548 5684 ns/op 3938 B/op 61 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 2.582s
|
||||
Type: alloc_space
|
||||
Time: Nov 29, 2019 at 11:59pm (EST)
|
||||
Entering interactive mode (type "help" for commands, "o" for options)
|
||||
(pprof) top
|
||||
Showing nodes accounting for 880.59MB, 80.63% of 1092.14MB total
|
||||
Dropped 33 nodes (cum <= 5.46MB)
|
||||
Showing top 10 nodes out of 35
|
||||
flat flat% sum% cum cum%
|
||||
22MB 2.01% 2.01% 903.57MB 82.73% github.com/dosco/super-graph/qcode.(*Compiler).Compile
|
||||
0 0% 2.01% 862.98MB 79.02% github.com/dosco/super-graph/psql.BenchmarkCompileParallel.func1
|
||||
0 0% 2.01% 862.98MB 79.02% testing.(*B).RunParallel.func1
|
||||
461.95MB 42.30% 44.31% 760.53MB 69.64% github.com/dosco/super-graph/qcode.(*Compiler).compileQuery
|
||||
396.63MB 36.32% 80.63% 396.63MB 36.32% github.com/dosco/super-graph/util.NewStack
|
||||
0 0% 80.63% 252.07MB 23.08% github.com/dosco/super-graph/qcode.(*Compiler).compileArgs
|
||||
0 0% 80.63% 228.15MB 20.89% testing.(*B).runN
|
||||
0 0% 80.63% 227.63MB 20.84% github.com/dosco/super-graph/psql.BenchmarkCompile
|
||||
0 0% 80.63% 227.63MB 20.84% testing.(*B).launch
|
||||
0 0% 80.63% 187.04MB 17.13% github.com/dosco/super-graph/psql.(*Compiler).Compile
|
||||
```
|
||||
|
||||
## Benchmarking
|
||||
|
||||
Most packages contain benchmark tests to ensure new features don't introduce a significant regression to performance.
|
||||
|
||||
```bash
|
||||
$ cd ./psql
|
||||
$ go test -v -run=xx -bench=.
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompile-8 60775 19076 ns/op 3919 B/op 61 allocs/op
|
||||
BenchmarkCompileParallel-8 207847 5172 ns/op 3937 B/op 61 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 2.530s
|
||||
```
|
||||
|
||||
## Reach out
|
||||
|
||||
If you'd like me to explain other parts of the code please reach out over Twitter or Discord. I'll keep adding to this doc as I get time.
|
||||
|
||||
## Developing Super Graph
|
||||
|
||||
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes. And the demo rails app is also launched to make it easier to test changes.
|
||||
|
||||
```bash
|
||||
|
||||
# yarn install dependencies and build the web ui
|
||||
make build
|
||||
|
||||
# do this the only the time to setup the database
|
||||
docker-compose run rails_app rake db:create db:migrate db:seed
|
||||
|
||||
# start super graph in development mode with a change watcher
|
||||
docker-compose up
|
||||
|
||||
```
|
||||
|
||||
## Developing on the Super Graph UI
|
||||
|
||||
```bash
|
||||
# yarn is needed to build the web ui
|
||||
brew install yarn
|
||||
|
||||
# this is where the react app for the ui lives
|
||||
cd internals/serv/web
|
||||
|
||||
# launch it in development mpde
|
||||
yarn start
|
||||
```
|
136
docs/website/docs/intro.md
Normal file
136
docs/website/docs/intro.md
Normal file
@ -0,0 +1,136 @@
|
||||
---
|
||||
id: intro
|
||||
title: Introduction
|
||||
sidebar_label: Introduction
|
||||
---
|
||||
|
||||
Super Graph is a service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more spending weeks or months writing backend API code. Just make the query you need and Super Graph will do the rest.
|
||||
|
||||
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Support for JWT tokens, DB migrations, seeding and a lot more.
|
||||
|
||||
## Features
|
||||
|
||||
- Role and Attribute based access control
|
||||
- Works with existing Ruby-On-Rails apps
|
||||
- Automatically learns database schemas and relationships
|
||||
- Full text search and aggregations
|
||||
- Rails authentication supported (Redis, Memcache, Cookie)
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join database with remote REST APIs
|
||||
- Highly optimized and fast Postgres SQL queries
|
||||
- GraphQL queries and mutations
|
||||
- A simple config file
|
||||
- High performance GO codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Fuzz tested for security
|
||||
- Database migrations tool
|
||||
- Database seeding tool
|
||||
- Works with Postgres and Yugabyte DB
|
||||
- OpenCensus Support: Zipkin, Prometheus, X-Ray, Stackdriver
|
||||
|
||||
## Try the demo app
|
||||
|
||||
```bash
|
||||
# clone the repository
|
||||
git clone https://github.com/dosco/super-graph
|
||||
|
||||
# run db in background
|
||||
docker-compose up -d db
|
||||
|
||||
# see logs and wait until DB is really UP
|
||||
docker-compose logs db
|
||||
|
||||
# setup the demo rails app & database and run it
|
||||
docker-compose run rails_app rake db:create db:migrate db:seed
|
||||
|
||||
# run the demo
|
||||
docker-compose up
|
||||
|
||||
# signin to the demo app (user1@demo.com / 123456)
|
||||
open http://localhost:3000
|
||||
|
||||
# try the super graph web ui
|
||||
open http://localhost:8080
|
||||
```
|
||||
|
||||
:::note Docker?
|
||||
This demo requires `docker` you can either install it using `brew` or from the
|
||||
docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/)
|
||||
:::
|
||||
|
||||
## GraphQL
|
||||
|
||||
We fully support queries and mutations. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
|
||||
|
||||
### GraphQL Query
|
||||
|
||||
```graphql
|
||||
query {
|
||||
users {
|
||||
id
|
||||
email
|
||||
picture: avatar
|
||||
password
|
||||
full_name
|
||||
products(limit: 2, where: { price: { gt: 10 } }) {
|
||||
id
|
||||
name
|
||||
description
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### JSON Result
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"email": "odilia@west.info",
|
||||
"picture": "https://robohash.org/simur.png?size=300x300",
|
||||
"full_name": "Edwin Orn",
|
||||
"products": [
|
||||
{
|
||||
"id": 16,
|
||||
"name": "Sierra Nevada Style Ale",
|
||||
"description": "Belgian Abbey, 92 IBU, 4.7%, 17.4°Blg",
|
||||
"price": 16.47
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
:::note Set User ID
|
||||
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the _HTTP Headers_ tab at the bottom of the web UI.
|
||||
:::
|
||||
|
||||
### GraphQL Mutation
|
||||
|
||||
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
101
docs/website/docs/react.md
Normal file
101
docs/website/docs/react.md
Normal file
@ -0,0 +1,101 @@
|
||||
---
|
||||
id: react
|
||||
title: ReactJS Examples
|
||||
sidebar_label: ReactJS Examples
|
||||
---
|
||||
|
||||
## Apollo Client
|
||||
|
||||
We recommend using the new [Apollo Client v3](https://www.apollographql.com/docs/react/v3.0-beta/) `@apollo/client` this is the latest version of the Apollo GraphQL javascript client library. Previous versions of this library were called `apollo-boost`.
|
||||
|
||||
This library contains react hooks `useQuery`, `useMutation`, `useLazyQuery`, etc that make it easy to add GraphQL queries to your React app.
|
||||
|
||||
```bash
|
||||
npm install @apollo/client graphql
|
||||
```
|
||||
|
||||
### Creating a client
|
||||
|
||||
```jsx title="App.js"
|
||||
import React from "react";
|
||||
import { ApolloClient, ApolloProvider } from "@apollo/client";
|
||||
|
||||
const client = new ApolloClient({ uri: `/api/v1/graphql` });
|
||||
```
|
||||
|
||||
### Or a client with caching enabled
|
||||
|
||||
```jsx title="App.js"
|
||||
import React from "react";
|
||||
import { ApolloClient, ApolloProvider, InMemoryCache } from "@apollo/client";
|
||||
|
||||
// Enable GraphQL caching really speeds up your app
|
||||
const cache = new InMemoryCache({
|
||||
freezeResults: true,
|
||||
|
||||
// Set `dataIdFromObject` id is not called `id`
|
||||
// dataIdFromObject: obj => { return obj.slug }
|
||||
});
|
||||
|
||||
const client = new ApolloClient({ uri: `/api/v1/graphql`, cache: cache });
|
||||
```
|
||||
|
||||
### Use the client in components to query for data
|
||||
|
||||
In this example we create a component `UserProfile` that user's name by his `id` and displays that on the page.
|
||||
|
||||
```jsx
|
||||
import React from "react";
|
||||
import { gql, useQuery } from "@apollo/client";
|
||||
|
||||
const UserProfile = { id } => {
|
||||
const query = gql`
|
||||
query getUser {
|
||||
user(id: $id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`;
|
||||
|
||||
const variables = { id };
|
||||
const { error, loading, data } = useQuery(query, { variables });
|
||||
|
||||
if (loading) {
|
||||
return <p>Loading</p>;
|
||||
}
|
||||
|
||||
return <h1>{data.name}</h1>
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Use the client in components to post data back
|
||||
|
||||
In this example we create a component `UpdateUserProfile` displays a text input which when changed causes a mutation query to be trigger to update the users name in the backend database. You can also insert or delete in mutation. More complex mutations like bulk insert, or nested insert and updates are also supported. Nested inserts are create to create an entry and create or update a related entitiy in the same request.
|
||||
|
||||
```jsx
|
||||
import React from "react";
|
||||
import { gql, useQuery } from "@apollo/client";
|
||||
|
||||
const UpdateUserProfile = { id } => {
|
||||
const query = gql`
|
||||
mutation setUserName {
|
||||
user(id: $id, update: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`;
|
||||
|
||||
const [setUserName] = useMutation(query);
|
||||
|
||||
const updateName = (e) => {
|
||||
let name = e.target.value;
|
||||
let variables = { data: { name } };
|
||||
|
||||
setUserName({ variables })
|
||||
}
|
||||
|
||||
return <input type="text" onChange={updateName}>
|
||||
}
|
||||
|
||||
```
|
217
docs/website/docs/security.md
Normal file
217
docs/website/docs/security.md
Normal file
@ -0,0 +1,217 @@
|
||||
---
|
||||
id: security
|
||||
title: API Security
|
||||
sidebar_label: API Security
|
||||
---
|
||||
|
||||
One of the the most common questions I get asked is what happens if a user out on the internet sends queries
|
||||
that we don't want run. For example how do we stop him from fetching all users or the emails of users. Our answer to this is that it is not an issue as this cannot happen, let me explain.
|
||||
|
||||
Super Graph runs in one of two modes `development` or `production`, this is controlled via the config value `production: false` when it's false it's running in development mode and when true, production. In development mode all the **named** queries (including mutations) are saved to the allow list `./config/allow.list`. While in production mode when Super Graph starts only the queries from this allow list file are registered with the database as [prepared statements](https://stackoverflow.com/questions/8263371/how-can-prepared-statements-protect-from-sql-injection-attacks).
|
||||
|
||||
Prepared statements are designed by databases to be fast and secure. They protect against all kinds of sql injection attacks and since they are pre-processed and pre-planned they are much faster to run then raw sql queries. Also there's no GraphQL to SQL compiling happening in production mode which makes your queries lighting fast as they are directly sent to the database with almost no overhead.
|
||||
|
||||
In short in production only queries listed in the allow list file `./config/allow.list` can be used, all other queries will be blocked.
|
||||
|
||||
:::tip How to think about the allow list?
|
||||
The allow list file is essentially a list of all your exposed API calls and the data that passes within them. It's very easy to build tooling to do things like parsing this file within your tests to ensure fields like `credit_card_no` are not accidently leaked. It's a great way to build compliance tooling and ensure your user data is always safe.
|
||||
:::
|
||||
|
||||
This is an example of a named query, `getUserWithProducts` is the name you've given to this query it can be anything you like but should be unique across all you're queries. Only named queries are saved in the allow list in development mode.
|
||||
|
||||
```graphql
|
||||
query getUserWithProducts {
|
||||
users {
|
||||
id
|
||||
name
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Authentication
|
||||
|
||||
You can only have one type of auth enabled either Rails or JWT.
|
||||
|
||||
### Ruby on Rails
|
||||
|
||||
Almost all Rails apps use Devise or Warden for authentication. Once the user is
|
||||
authenticated a session is created with the users ID. The session can either be
|
||||
stored in the users browser as a cookie, memcache or redis. If memcache or redis is used then a cookie is set in the users browser with just the session id.
|
||||
|
||||
Super Graph can handle all these variations including the old and new session formats. Just enable the right `auth` config based on how your rails app is configured.
|
||||
|
||||
#### Cookie session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
```
|
||||
|
||||
#### Memcache session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Memcache remote cookie store.
|
||||
url: memcache://127.0.0.1
|
||||
```
|
||||
|
||||
#### Redis session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Redis remote cookie store
|
||||
url: redis://127.0.0.1:6379
|
||||
password: ""
|
||||
max_idle: 80
|
||||
max_active: 12000
|
||||
```
|
||||
|
||||
### JWT Tokens
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: jwt
|
||||
|
||||
jwt:
|
||||
# the two providers are 'auth0' and 'none'
|
||||
provider: auth0
|
||||
secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
public_key_file: /secrets/public_key.pem
|
||||
public_key_type: ecdsa #rsa
|
||||
```
|
||||
|
||||
For JWT tokens we currently support tokens from a provider like Auth0 or if you have a custom solution then we look for the `user_id` in the `subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
|
||||
|
||||
We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there.
|
||||
|
||||
For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file.
|
||||
|
||||
### HTTP Headers
|
||||
|
||||
```yaml
|
||||
header:
|
||||
name: X-AppEngine-QueueName
|
||||
exists: true
|
||||
#value: default
|
||||
```
|
||||
|
||||
Header auth is usually the best option to authenticate requests to the action endpoints. For example you
|
||||
might want to use an action to refresh a materalized view every hour and only want a cron service like the Google AppEngine Cron service to make that request in this case a config similar to the one above will do.
|
||||
|
||||
The `exists: true` parameter ensures that only the existance of the header is checked not its value. The `value` parameter lets you confirm that the value matches the one assgined to the parameter. This helps in the case you are using a shared secret to protect the endpoint.
|
||||
|
||||
### Named Auth
|
||||
|
||||
```yaml
|
||||
# You can add additional named auths to use with actions
|
||||
# In this example actions using this auth can only be
|
||||
# called from the Google Appengine Cron service that
|
||||
# sets a special header to all it's requests
|
||||
auths:
|
||||
- name: from_taskqueue
|
||||
type: header
|
||||
header:
|
||||
name: X-Appengine-Cron
|
||||
exists: true
|
||||
```
|
||||
|
||||
In addition to the default auth configuration you can create additional named auth configurations to be used
|
||||
with features like `actions`. For example while your main GraphQL endpoint uses JWT for authentication you may want to use a header value to ensure your actions can only be called by clients having access to a shared secret
|
||||
or security header.
|
||||
|
||||
## Actions
|
||||
|
||||
Actions is a very useful feature that is currently work in progress. For now the best use case for actions is to
|
||||
refresh database tables like materialized views or call a database procedure to refresh a cache table, etc. An action creates an http endpoint that anyone can call to have the SQL query executed. The below example will create an endpoint `/api/v1/actions/refresh_leaderboard_users` any request send to that endpoint will cause the sql query to be executed. the `auth_name` points to a named auth that should be used to secure this endpoint. In future we have big plans to allow your own custom code to run using actions.
|
||||
|
||||
```yaml
|
||||
actions:
|
||||
- name: refresh_leaderboard_users
|
||||
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
|
||||
auth_name: from_taskqueue
|
||||
```
|
||||
|
||||
#### Using CURL to test a query
|
||||
|
||||
```bash
|
||||
# fetch the response json directly from the endpoint using user id 5
|
||||
curl 'http://localhost:8080/api/v1/graphql' \
|
||||
-H 'content-type: application/json' \
|
||||
-H 'X-User-ID: 5' \
|
||||
--data-binary '{"query":"{ products { name price users { email }}}"}'
|
||||
```
|
||||
|
||||
## Access Control
|
||||
|
||||
It's common for APIs to control what information they return or insert based on the role of the user. In Super Graph we have two primary roles `user` and `anon` the first for users where a `user_id` is available the latter for users where it's not.
|
||||
|
||||
:::info Define authenticated request?
|
||||
An authenticated request is one where Super Graph can extract an `user_id` based on the configured authentication method (jwt, rails cookies, etc).
|
||||
:::
|
||||
|
||||
The `user` role can be divided up into further roles based on attributes in the database. For example when fetching a list of users, a normal user can only fetch his own entry while an admin can fetch all the users within a company and an admin user can fetch everyone. In some places this is called Attribute based access control. So in way we support both. Role based access control and Attribute based access control.
|
||||
|
||||
Super Graph allows you to create roles dynamically using a `roles_query` and `match` config values.
|
||||
|
||||
### Configure RBAC
|
||||
|
||||
```yaml
|
||||
roles_query: "SELECT * FROM users WHERE users.id = $user_id"
|
||||
|
||||
roles:
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns: ["id", "name", "description"]
|
||||
presets:
|
||||
- created_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
presets:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: users.id = 1
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: []
|
||||
```
|
||||
|
||||
This configuration is relatively simple to follow the `roles_query` parameter is the query that must be run to help figure out a users role. This query can be as complex as you like and include joins with other tables.
|
||||
|
||||
The individual roles are defined under the `roles` parameter and this includes each table the role has a custom setting for. The role is dynamically matched using the `match` parameter for example in the above case `users.id = 1` means that when the `roles_query` is executed a user with the id `1` will be assigned the admin role and those that don't match get the `user` role if authenticated successfully or the `anon` role.
|
249
docs/website/docs/seed.md
Normal file
249
docs/website/docs/seed.md
Normal file
@ -0,0 +1,249 @@
|
||||
---
|
||||
id: seed
|
||||
title: Database Seeding
|
||||
sidebar_label: Seed Scripts
|
||||
---
|
||||
|
||||
While developing it's often useful to be able to have fake data available in the database. Fake data can help with building the UI and save you time when trying to get the GraphQL query correct. Super Graph has the ability do this for you. All you have to do is write a seed script `config/seed.js` (In Javascript) and use the `db:seed` command line option. Below is an example of kind of things you can do in a seed script.
|
||||
|
||||
## Creating fake users
|
||||
|
||||
Since all mutations and queries are in standard GraphQL you can use all the features available in Super Graph GraphQL.
|
||||
|
||||
```javascript
|
||||
var users = [];
|
||||
|
||||
for (i = 0; i < 20; i++) {
|
||||
var data = {
|
||||
slug: util.make_slug(fake.first_name() + "-" + fake.last_name()),
|
||||
first_name: fake.first_name(),
|
||||
last_name: fake.last_name(),
|
||||
picture_url: fake.avatar_url(),
|
||||
email: fake.email(),
|
||||
bio: fake.sentence(10),
|
||||
};
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
user(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data });
|
||||
|
||||
users.push(res.user);
|
||||
}
|
||||
```
|
||||
|
||||
## Inserting the users fake blog posts
|
||||
|
||||
Another example highlighting how the `connect` syntax of Super Graph GraphQL can be used to connect inserted posts
|
||||
to random users that were previously created. For futher details checkout the [seed script](/seed) documentation.
|
||||
|
||||
```javascript
|
||||
var posts = [];
|
||||
|
||||
for (i = 0; i < 1500; i++) {
|
||||
var user.id = users[Math.floor(Math.random() * 10)];
|
||||
|
||||
var data = {
|
||||
slug: util.make_slug(fake.sentence(3) + i),
|
||||
body: fake.sentence(100),
|
||||
published: true,
|
||||
thread: {
|
||||
connect: { user: user.id }
|
||||
}
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
post(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}",
|
||||
{ data: data },
|
||||
{ user_id: u.id })
|
||||
|
||||
posts.push(res.post.slug)
|
||||
}
|
||||
```
|
||||
|
||||
## Insert a large number of rows efficiently
|
||||
|
||||
This feature uses the `COPY` functionality available in Postgres this is the best way to
|
||||
insert a large number of rows into a table. The `import_csv` function reads in a CSV file using the first
|
||||
line of the file as column names.
|
||||
|
||||
```javascript
|
||||
import_csv("post_tags", "./tags.csv");
|
||||
```
|
||||
|
||||
## A list of fake data functions available to you.
|
||||
|
||||
```
|
||||
person
|
||||
name
|
||||
name_prefix
|
||||
name_suffix
|
||||
first_name
|
||||
last_name
|
||||
gender
|
||||
ssn
|
||||
contact
|
||||
email
|
||||
phone
|
||||
phone_formatted
|
||||
username
|
||||
password
|
||||
|
||||
// Address
|
||||
address
|
||||
city
|
||||
country
|
||||
country_abr
|
||||
state
|
||||
state_abr
|
||||
street
|
||||
street_name
|
||||
street_number
|
||||
street_prefix
|
||||
street_suffix
|
||||
zip
|
||||
latitude
|
||||
latitude_in_range
|
||||
longitude
|
||||
longitude_in_range
|
||||
|
||||
// Beer
|
||||
beer_alcohol
|
||||
beer_hop
|
||||
beer_ibu
|
||||
beer_blg
|
||||
beer_malt
|
||||
beer_name
|
||||
beer_style
|
||||
beer_yeast
|
||||
|
||||
// Cars
|
||||
car
|
||||
car_type
|
||||
car_maker
|
||||
car_model
|
||||
|
||||
// Text
|
||||
word
|
||||
sentence
|
||||
paragraph
|
||||
question
|
||||
quote
|
||||
|
||||
// Misc
|
||||
generate
|
||||
boolean
|
||||
uuid
|
||||
|
||||
// Colors
|
||||
color
|
||||
hex_color
|
||||
rgb_color
|
||||
safe_color
|
||||
|
||||
// Internet
|
||||
url
|
||||
image_url
|
||||
avatar_url
|
||||
domain_name
|
||||
domain_suffix
|
||||
ipv4_address
|
||||
ipv6_address
|
||||
http_method
|
||||
user_agent
|
||||
user_agent_firefox
|
||||
user_agent_chrome
|
||||
user_agent_opera
|
||||
user_agent_safari
|
||||
|
||||
// Date / Time
|
||||
date
|
||||
date_range
|
||||
nano_second
|
||||
second
|
||||
minute
|
||||
hour
|
||||
month
|
||||
day
|
||||
weekday
|
||||
year
|
||||
timezone
|
||||
timezone_abv
|
||||
timezone_full
|
||||
timezone_offset
|
||||
|
||||
// Payment
|
||||
price
|
||||
credit_card
|
||||
credit_card_cvv
|
||||
credit_card_number
|
||||
credit_card_type
|
||||
currency
|
||||
currency_long
|
||||
currency_short
|
||||
|
||||
// Company
|
||||
bs
|
||||
buzzword
|
||||
company
|
||||
company_suffix
|
||||
job
|
||||
job_description
|
||||
job_level
|
||||
job_title
|
||||
|
||||
// Hacker
|
||||
hacker_abbreviation
|
||||
hacker_adjective
|
||||
hacker_noun
|
||||
hacker_phrase
|
||||
hacker_verb
|
||||
|
||||
//Hipster
|
||||
hipster_word
|
||||
hipster_paragraph
|
||||
hipster_sentence
|
||||
|
||||
// File
|
||||
file_extension
|
||||
file_mine_type
|
||||
|
||||
// Numbers
|
||||
number
|
||||
numerify
|
||||
int8
|
||||
int16
|
||||
int32
|
||||
int64
|
||||
uint8
|
||||
uint16
|
||||
uint32
|
||||
uint64
|
||||
float32
|
||||
float32_range
|
||||
float64
|
||||
float64_range
|
||||
shuffle_ints
|
||||
mac_address
|
||||
|
||||
// String
|
||||
digit
|
||||
letter
|
||||
lexify
|
||||
rand_string
|
||||
numerify
|
||||
```
|
||||
|
||||
## Some more utility functions
|
||||
|
||||
```
|
||||
shuffle_strings(string_array)
|
||||
make_slug(text)
|
||||
make_slug_lang(text, lang)
|
||||
```
|
170
docs/website/docs/start.md
Normal file
170
docs/website/docs/start.md
Normal file
@ -0,0 +1,170 @@
|
||||
---
|
||||
id: start
|
||||
title: Getting Started
|
||||
sidebar_label: Getting Started
|
||||
---
|
||||
|
||||
Super Graph can generate your initial app for you. The generated app will have config files, database migrations and seed files among other things like docker related files.
|
||||
|
||||
You can then add your database schema to the migrations, maybe create some seed data using the seed script and launch Super Graph. You're now good to go and can start working on your UI frontend in React, Vue or whatever.
|
||||
|
||||
```bash
|
||||
# Download and install Super Graph. You will need Go 1.14 or above
|
||||
go get github.com/dosco/super-graph
|
||||
```
|
||||
|
||||
And then create and launch your new app
|
||||
|
||||
```bash
|
||||
# Create a new app and change to it's directory
|
||||
super-graph new blog
|
||||
|
||||
cd blog
|
||||
|
||||
# Setup the app database and seed it with fake data.
|
||||
# Docker compose will start a Postgres database for your app
|
||||
docker-compose run blog_api ./super-graph db:setup
|
||||
|
||||
# Finally launch Super Graph configured for your app
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
Lets take a look at the files generated by Super Graph when you create a new app
|
||||
|
||||
```bash
|
||||
super-graph new blog
|
||||
|
||||
> created 'blog'
|
||||
> created 'blog/Dockerfile'
|
||||
> created 'blog/docker-compose.yml'
|
||||
> created 'blog/config'
|
||||
> created 'blog/config/dev.yml'
|
||||
> created 'blog/config/prod.yml'
|
||||
> created 'blog/config/seed.js'
|
||||
> created 'blog/config/migrations'
|
||||
> created 'blog/config/migrations/100_init.sql'
|
||||
> app 'blog' initialized
|
||||
```
|
||||
|
||||
:::note Docker
|
||||
Docker Compose is a great way to run multiple services while developing on your desktop or laptop. In our case we need Postgres and Super Graph to both be running and the `docker-compose.yml` is configured to do just that. The Super Graph service is named after your app postfixed with `_api`. The Dockerfile can be used build a containr of your app for production deployment.
|
||||
:::
|
||||
|
||||
Run Super Graph with Docker compose
|
||||
|
||||
```bash
|
||||
docker-compose run blog_api ./super-graph help
|
||||
```
|
||||
|
||||
### Config files
|
||||
|
||||
All the config files needs to configure Super Graph for your app are contained in this folder for starters you have two `dev.yaml` and `prod.yaml`. When the `GO_ENV` environment variable is set to `development` then `dev.yaml` is used and the prod one when it's set to `production`. Stage and Test are the other two environment options, but you can set the `GO_ENV` to whatever you like (eg. `alpha-test`) and Super Graph will look for a yaml file with that name to load config from.
|
||||
|
||||
### Seed.js
|
||||
|
||||
Having data flowing through your API makes building your frontend UI so much easier. When creafting say a user profile wouldn't it be nice for the API to return a fake user with name, picture and all. This is why having the ability to seed your database is important. Seeding cn also be used in production to setup some initial users like the admins or to add an initial set of products to a ecommerce store.
|
||||
|
||||
Super Graph makes this easy by allowing you to write your seeding script in plan old Javascript. The below file that auto-generated for new apps uses our built-in functions `fake` and `graphql` to generate fake data and use GraphQL mutations to insert it into the database.
|
||||
|
||||
```javascript
|
||||
// Example script to seed database
|
||||
|
||||
var users = [];
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
var data = {
|
||||
full_name: fake.name(),
|
||||
email: fake.email(),
|
||||
};
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
user(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data });
|
||||
|
||||
users.push(res.user);
|
||||
}
|
||||
```
|
||||
|
||||
If you want to import a lot of data using a CSV file is the best and fastest option. The `import_csv` command uses the `COPY FROM` Postgres method to load massive amounts of data into tables. The first line of the CSV file must be the header with column names.
|
||||
|
||||
```javascript
|
||||
var post_count = import_csv("posts", "posts.csv");
|
||||
```
|
||||
|
||||
You can generate the following fake data for your seeding purposes. Below is the list of fake data functions supported by the built-in fake data library. For example `fake.image_url()` will generate a fake image url or `fake.shuffle_strings(['hello', 'world', 'cool'])` will generate a randomly shuffled version of that array of strings or `fake.rand_string(['hello', 'world', 'cool'])` will return a random string from the array provided.
|
||||
|
||||
### Migrations
|
||||
|
||||
Easy database migrations is the most important thing when building products backend by a relational database. We make it super easy to manage and migrate your database.
|
||||
|
||||
```bash
|
||||
super-graph db:new create_users
|
||||
> created migration 'config/migrations/101_create_users.sql'
|
||||
```
|
||||
|
||||
Migrations in Super Graph are plain old Postgres SQL. Here's an example for the above migration.
|
||||
|
||||
```sql
|
||||
-- Write your migrate up statements here
|
||||
|
||||
CREATE TABLE public.users (
|
||||
id bigint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
full_name text,
|
||||
email text UNIQUE NOT NULL CHECK (length(email) < 255),
|
||||
created_at timestamptz NOT NULL NOT NULL DEFAULT NOW(),
|
||||
updated_at timestamptz NOT NULL NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
-- Write your down migrate statements here. If this migration is irreversible
|
||||
-- then delete the separator line above.
|
||||
|
||||
DROP TABLE public.users
|
||||
```
|
||||
|
||||
We would encourage you to leverage triggers to maintain consistancy of your data for example here are a couple triggers that you can add to you init migration and across your tables.
|
||||
|
||||
```sql
|
||||
-- This trigger script will set the updated_at column everytime a row is updated
|
||||
CREATE OR REPLACE FUNCTION trigger_set_updated_at()
|
||||
RETURNS TRIGGER SET SCHEMA 'public' LANGUAGE 'plpgsql' AS $$
|
||||
BEGIN
|
||||
new.updated_at = now();
|
||||
RETURN new;
|
||||
END;
|
||||
$$;
|
||||
|
||||
...
|
||||
|
||||
-- An exmple of adding this trigger to the users table
|
||||
CREATE TRIGGER set_updated_at BEFORE UPDATE ON public.users
|
||||
FOR EACH ROW EXECUTE PROCEDURE trigger_set_updated_at();
|
||||
```
|
||||
|
||||
```sql
|
||||
-- This trigger script will set the user_id column to the current
|
||||
-- Super Graph user.id value everytime a row is created or updated
|
||||
CREATE OR REPLACE FUNCTION trigger_set_user_id()
|
||||
RETURNS TRIGGER SET SCHEMA 'public' LANGUAGE 'plpgsql' AS $$
|
||||
BEGIN
|
||||
IF TG_OP = 'UPDATE' THEN
|
||||
new.user_id = old.user_id;
|
||||
ELSE
|
||||
new.user_id = current_setting('user.id')::int;
|
||||
END IF;
|
||||
|
||||
RETURN new;
|
||||
END;
|
||||
$$;
|
||||
|
||||
...
|
||||
|
||||
-- An exmple of adding this trigger to the blog_posts table
|
||||
CREATE TRIGGER set_user_id BEFORE INSERT OR UPDATE ON public.blog_posts
|
||||
FOR EACH ROW EXECUTE PROCEDURE trigger_set_user_id();
|
||||
|
||||
```
|
82
docs/website/docs/telemetry.md
Normal file
82
docs/website/docs/telemetry.md
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
id: telemetry
|
||||
title: Tracing and Metrics
|
||||
sidebar_label: Telemetry
|
||||
---
|
||||
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl'; // Add to the top of the file below the front matter.
|
||||
|
||||
Having observability and telemetry is at the core of any production ready service. Super Graph has built-in support for OpenCensus for tracing requests all the way from HTTP to the database and providing all kinds of metrics.
|
||||
|
||||
OpenCensus has a concept called exporters these are external services that can consume this data and make to give you graphs, charts, alerting etc. Super Graph again has built-in support for Zipkin, Prometheus, Google Stackdriver and the AWS X-Ray exporters.
|
||||
|
||||
## Telemetry config
|
||||
|
||||
The `telemetry` section of the standard config files is where you set values to configure this feature to your needs.
|
||||
|
||||
```yaml
|
||||
telemetry:
|
||||
debug: true
|
||||
interval: 5s
|
||||
metrics:
|
||||
exporter: "prometheus"
|
||||
endpoint: ""
|
||||
namespace: "web api"
|
||||
key: "1234xyz"
|
||||
tracing:
|
||||
exporter: "zipkin"
|
||||
endpoint: "http://zipkin:9411/api/v2/spans"
|
||||
sample: 0.2
|
||||
include_query: false
|
||||
include_params: false
|
||||
```
|
||||
|
||||
**debug**: Enabling debug enables an embedded web ui to test and debug tracing and metrics. This UI called `zPages` is provided by OpenCensus and will be made available on the `/telemetry` path. For more information on using `zPages` https://opencensus.io/zpages/. Remeber to disable this in production.
|
||||
|
||||
**interval**: This controls the interval setting for OpenCensus metrics collection. This deafaults to `5 seconds` if not set.
|
||||
|
||||
**metric.exporters** Setting this enables metrics collection. The supported values for this field are `prometheus` and `stackdriver`. The Prometheus exporter requires `metric.namespace` to be set. The Sackdriver exporter requires the `metric.key` to be set to the Google Cloud Project ID.
|
||||
|
||||
**metric.endpoint** Is not currently used by any of the exporters.
|
||||
|
||||
**tracing.exporter** Setting this enables request tracing. The supported values for this field are `zipkin`, `aws` and `xray`. Zipkin requires `tracing.endpoint` to be set. AWS and Xray are the same and do not require any addiitonal settings.
|
||||
|
||||
**tracing.sample** This controls what percentage of the requests should be traced. By default `0.5` or 50% of the requests are traced, `always` is also a valid value for this field and it means all requests will be traced.
|
||||
|
||||
**include_query** Include the Super Graph SQL query to the trace. Be careful with this setting in production it will add the entire SQL query to the trace. This can be veru useful to debug slow requests.
|
||||
|
||||
**include_params** Include the Super Graph SQL query parameters to the trace. Be careful with this setting in production it will it can potentially leak sensitive user information into tracing logs.
|
||||
|
||||
## Using Zipkin
|
||||
|
||||
Zipkin is a really great open source request tracing project. It's easy to add to your current Super Graph app as a way to test tracing in development. Add the following to the Super Graph generated `docker-compose.yml` file. Also add `zipkin` in your current apps `depends_on` list. Once setup the Zipkin UI is available at http://localhost:9411
|
||||
|
||||
```yaml
|
||||
your_api:
|
||||
...
|
||||
depends_on:
|
||||
- db
|
||||
- zipkin
|
||||
|
||||
zipkin:
|
||||
image: openzipkin/zipkin-slim
|
||||
container_name: zipkin
|
||||
# Environment settings are defined here https://github.com/openzipkin/zipkin/blob/master/zipkin-server/README.md#environment-variables
|
||||
environment:
|
||||
- STORAGE_TYPE=mem
|
||||
# Uncomment to enable self-tracing
|
||||
# - SELF_TRACING_ENABLED=true
|
||||
# Uncomment to enable debug logging
|
||||
# - JAVA_OPTS=-Dorg.slf4j.simpleLogger.log.zipkin2=debug
|
||||
ports:
|
||||
# Port used for the Zipkin UI and HTTP Api
|
||||
- 9411:9411
|
||||
```
|
||||
|
||||
### Zipkin HTTP to DB traces
|
||||
|
||||
<img alt="Zipkin Traces" src={useBaseUrl("img/zipkin1.png")} />
|
||||
|
||||
### Zipkin trace details
|
||||
|
||||
<img alt="Zipkin Traces" src={useBaseUrl('img/zipkin2.png')} />
|
81
docs/website/docs/why.md
Normal file
81
docs/website/docs/why.md
Normal file
@ -0,0 +1,81 @@
|
||||
---
|
||||
id: why
|
||||
title: Why use Super Graph?
|
||||
sidebar_label: Why Super Graph
|
||||
---
|
||||
|
||||
Let's take a simple example say you want to fetch 5 products priced over 12 dollars along with the photos of the products and the users that owns them. Additionally also fetch the last 10 of your own purchases along with the name and ID of the product you purchased. This is a common type of query to render a view in say an ecommerce app. Lets be honest it's not very exciting write and maintain. Keep in mind the data needed will only continue to grow and change as your app evolves. Developers might find that most ORMs will not be able to do all of this in a single SQL query and will require n+1 queries to fetch all the data and assembly it into the right JSON response.
|
||||
|
||||
What if I told you Super Graph will fetch all this data with a single SQL query and without you having to write a single line of code. Also as your app evolves feel free to evolve the query as you like. In our experience Super Graph saves us hundreds or thousands of man hours that we can put towards the more exciting parts of our app.
|
||||
|
||||
#### GraphQL Query
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(limit: 5, where: { price: { gt: 12 } }) {
|
||||
id
|
||||
name
|
||||
description
|
||||
price
|
||||
photos {
|
||||
url
|
||||
}
|
||||
user {
|
||||
id
|
||||
email
|
||||
picture: avatar
|
||||
full_name
|
||||
}
|
||||
}
|
||||
purchases(
|
||||
limit: 10
|
||||
order_by: { created_at: desc }
|
||||
where: { user_id: { eq: $user_id } }
|
||||
) {
|
||||
id
|
||||
created_at
|
||||
product {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### JSON Result
|
||||
|
||||
```json
|
||||
|
||||
"data": {
|
||||
"products": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Oaked Arrogant Bastard Ale",
|
||||
"description": "Coors lite, European Amber Lager, Perle, 1272 - American Ale II, 38 IBU, 6.4%, 9.7°Blg",
|
||||
"price": 20,
|
||||
"photos: [{
|
||||
"url": "https://www.scienceworld.ca/wp-content/uploads/science-world-beer-flavours.jpg"
|
||||
}],
|
||||
"user": {
|
||||
"id": 1,
|
||||
"email": "user0@demo.com",
|
||||
"picture": "https://robohash.org/sitaliquamquaerat.png?size=300x300&set=set1",
|
||||
"full_name": "Mrs. Wilhemina Hilpert"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
},
|
||||
"purchases": [
|
||||
{
|
||||
"id": 5,
|
||||
"created_at": "2020-01-24T05:34:39.880599",
|
||||
"product": {
|
||||
"id": 45,
|
||||
"name": "Brooklyn Black",
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user