Compare commits
20 Commits
Author | SHA1 | Date | |
---|---|---|---|
3bf9f02a9f | |||
533c767e1d | |||
84d55dbc8a | |||
5aafff6310 | |||
840aaf64ff | |||
7bbb56a328 | |||
394b08b2fe | |||
842252f9e2 | |||
279f5616d1 | |||
04bb88f74b | |||
38ed6dbc5f | |||
ec2f8d0c58 | |||
9b51065414 | |||
1a70603b1a | |||
505335d872 | |||
bdc8c65a09 | |||
03fe29b088 | |||
5857efdd70 | |||
bdffe7b14e | |||
ae7cde0433 |
@ -5,18 +5,18 @@ info:
|
||||
repository_url: https://github.com/dosco/super-graph
|
||||
options:
|
||||
commits:
|
||||
# filters:
|
||||
# Type:
|
||||
# - feat
|
||||
# - fix
|
||||
# - perf
|
||||
# - refactor
|
||||
filters:
|
||||
Type:
|
||||
- feat
|
||||
- fix
|
||||
- perf
|
||||
- refactor
|
||||
commit_groups:
|
||||
# title_maps:
|
||||
# feat: Features
|
||||
# fix: Bug Fixes
|
||||
# perf: Performance Improvements
|
||||
# refactor: Code Refactoring
|
||||
title_maps:
|
||||
feat: Features
|
||||
fix: Bug Fixes
|
||||
perf: Performance Improvements
|
||||
refactor: Code Refactoring
|
||||
header:
|
||||
pattern: "^((\\w+)\\s.*)$"
|
||||
pattern_maps:
|
||||
|
8
.deepsource.toml
Normal file
8
.deepsource.toml
Normal file
@ -0,0 +1,8 @@
|
||||
version = 1
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_path = "github.com/dosco/super-graph"
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -23,6 +23,8 @@
|
||||
/tmp/runner-build
|
||||
/demo/tmp
|
||||
|
||||
.idea
|
||||
*.iml
|
||||
.vscode
|
||||
.DS_Store
|
||||
.swp
|
||||
@ -35,4 +37,5 @@ suppressions
|
||||
release
|
||||
.gofuzz
|
||||
*-fuzz.zip
|
||||
*.test
|
||||
|
||||
|
570
CHANGELOG.md
570
CHANGELOG.md
@ -1,401 +1,371 @@
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased]
|
||||
|
||||
### Add
|
||||
- Add config driven custom table relationships
|
||||
- Add support for `websearch_to_tsquery` in PG 11
|
||||
|
||||
### Create
|
||||
- Create CODE_OF_CONDUCT.md
|
||||
<a name="v0.13.22"></a>
|
||||
## [v0.13.22] - 2020-05-01
|
||||
|
||||
### Fix
|
||||
- Fix bug with remote join example
|
||||
- Fix grammer / syntax
|
||||
<a name="v0.13.21"></a>
|
||||
## [v0.13.21] - 2020-04-24
|
||||
|
||||
### Update
|
||||
- Update issue templates
|
||||
- Update CONTRIBUTING.md
|
||||
- Update issue templates
|
||||
- Update feature_request.md
|
||||
<a name="v0.13.20"></a>
|
||||
## [v0.13.20] - 2020-04-24
|
||||
|
||||
<a name="v0.13.19"></a>
|
||||
## [v0.13.19] - 2020-04-23
|
||||
|
||||
<a name="v0.13.18"></a>
|
||||
## [v0.13.18] - 2020-04-23
|
||||
|
||||
<a name="v0.13.17"></a>
|
||||
## [v0.13.17] - 2020-04-22
|
||||
|
||||
<a name="v0.13.16"></a>
|
||||
## [v0.13.16] - 2020-04-21
|
||||
### Features
|
||||
- feat : improve the generated introspection schema and avoid the chirino/graphql api leaking through the core api. ([#53](https://github.com/dosco/super-graph/issues/53))
|
||||
|
||||
|
||||
<a name="v0.13.15"></a>
|
||||
## [v0.13.15] - 2020-04-20
|
||||
|
||||
<a name="v0.13.14"></a>
|
||||
## [v0.13.14] - 2020-04-19
|
||||
|
||||
<a name="v0.13.13"></a>
|
||||
## [v0.13.13] - 2020-04-19
|
||||
|
||||
<a name="v0.13.12"></a>
|
||||
## [v0.13.12] - 2020-04-19
|
||||
|
||||
<a name="v0.13.11"></a>
|
||||
## [v0.13.11] - 2020-04-18
|
||||
|
||||
<a name="v0.13.10"></a>
|
||||
## [v0.13.10] - 2020-04-17
|
||||
|
||||
<a name="v0.13.9"></a>
|
||||
## [v0.13.9] - 2020-04-16
|
||||
|
||||
<a name="v0.13.8"></a>
|
||||
## [v0.13.8] - 2020-04-16
|
||||
|
||||
<a name="v0.13.7"></a>
|
||||
## [v0.13.7] - 2020-04-16
|
||||
|
||||
<a name="v0.13.6"></a>
|
||||
## [v0.13.6] - 2020-04-13
|
||||
|
||||
<a name="v0.13.5"></a>
|
||||
## [v0.13.5] - 2020-04-13
|
||||
|
||||
<a name="v0.13.4"></a>
|
||||
## [v0.13.4] - 2020-04-12
|
||||
|
||||
<a name="v0.13.3"></a>
|
||||
## [v0.13.3] - 2020-04-12
|
||||
|
||||
<a name="v0.13.2"></a>
|
||||
## [v0.13.2] - 2020-04-11
|
||||
|
||||
<a name="v0.13.1"></a>
|
||||
## [v0.13.1] - 2020-04-11
|
||||
|
||||
<a name="v0.13.0"></a>
|
||||
## [v0.13.0] - 2020-04-10
|
||||
|
||||
<a name="v0.12.49"></a>
|
||||
## [v0.12.49] - 2020-04-01
|
||||
|
||||
<a name="v0.12.48"></a>
|
||||
## [v0.12.48] - 2020-03-31
|
||||
|
||||
<a name="v0.12.47"></a>
|
||||
## [v0.12.47] - 2020-03-30
|
||||
|
||||
<a name="v0.12.46"></a>
|
||||
## [v0.12.46] - 2020-03-21
|
||||
|
||||
<a name="v0.12.45"></a>
|
||||
## [v0.12.45] - 2020-03-18
|
||||
|
||||
<a name="v0.12.44"></a>
|
||||
## [v0.12.44] - 2020-03-16
|
||||
|
||||
<a name="v0.12.43"></a>
|
||||
## [v0.12.43] - 2020-03-16
|
||||
|
||||
<a name="v0.12.42"></a>
|
||||
## [v0.12.42] - 2020-03-14
|
||||
|
||||
<a name="v0.12.41"></a>
|
||||
## [v0.12.41] - 2020-03-06
|
||||
|
||||
<a name="v0.12.40"></a>
|
||||
## [v0.12.40] - 2020-03-06
|
||||
|
||||
<a name="v0.12.39"></a>
|
||||
## [v0.12.39] - 2020-03-06
|
||||
|
||||
<a name="v0.12.38"></a>
|
||||
## [v0.12.38] - 2020-03-05
|
||||
|
||||
<a name="v0.12.37"></a>
|
||||
## [v0.12.37] - 2020-03-04
|
||||
|
||||
<a name="v0.12.36"></a>
|
||||
## [v0.12.36] - 2020-03-04
|
||||
|
||||
<a name="v0.12.35"></a>
|
||||
## [v0.12.35] - 2020-03-03
|
||||
|
||||
<a name="v0.12.34"></a>
|
||||
## [v0.12.34] - 2020-03-03
|
||||
|
||||
<a name="v0.12.33"></a>
|
||||
## [v0.12.33] - 2020-02-29
|
||||
|
||||
<a name="v0.12.32"></a>
|
||||
## [v0.12.32] - 2020-02-24
|
||||
### Bug Fixes
|
||||
- fix "Try the demo app" in docs ([#38](https://github.com/dosco/super-graph/issues/38))
|
||||
|
||||
|
||||
<a name="v0.12.31"></a>
|
||||
## [v0.12.31] - 2020-02-23
|
||||
|
||||
<a name="v0.12.30"></a>
|
||||
## [v0.12.30] - 2020-02-23
|
||||
|
||||
<a name="v0.12.29"></a>
|
||||
## [v0.12.29] - 2020-02-21
|
||||
|
||||
<a name="v0.12.28"></a>
|
||||
## [v0.12.28] - 2020-02-20
|
||||
|
||||
<a name="v0.12.27"></a>
|
||||
## [v0.12.27] - 2020-02-19
|
||||
|
||||
<a name="v0.12.26"></a>
|
||||
## [v0.12.26] - 2020-02-11
|
||||
|
||||
<a name="v0.12.25"></a>
|
||||
## [v0.12.25] - 2020-02-10
|
||||
|
||||
<a name="v0.12.24"></a>
|
||||
## [v0.12.24] - 2020-02-03
|
||||
|
||||
<a name="v0.12.23"></a>
|
||||
## [v0.12.23] - 2020-02-02
|
||||
|
||||
<a name="v0.12.22"></a>
|
||||
## [v0.12.22] - 2020-02-01
|
||||
|
||||
<a name="v0.12.21"></a>
|
||||
## [v0.12.21] - 2020-01-31
|
||||
|
||||
<a name="v0.12.20"></a>
|
||||
## [v0.12.20] - 2020-01-28
|
||||
|
||||
<a name="v0.12.19"></a>
|
||||
## [v0.12.19] - 2020-01-26
|
||||
|
||||
<a name="v0.12.18"></a>
|
||||
## [v0.12.18] - 2020-01-20
|
||||
|
||||
<a name="v0.12.17"></a>
|
||||
## [v0.12.17] - 2020-01-20
|
||||
|
||||
<a name="v0.12.16"></a>
|
||||
## [v0.12.16] - 2020-01-19
|
||||
|
||||
<a name="v0.12.15"></a>
|
||||
## [v0.12.15] - 2020-01-17
|
||||
|
||||
<a name="v0.12.14"></a>
|
||||
## [v0.12.14] - 2020-01-17
|
||||
|
||||
<a name="v0.12.13"></a>
|
||||
## [v0.12.13] - 2020-01-16
|
||||
|
||||
<a name="v0.12.12"></a>
|
||||
## [v0.12.12] - 2020-01-15
|
||||
|
||||
<a name="v0.12.11"></a>
|
||||
## [v0.12.11] - 2020-01-14
|
||||
|
||||
<a name="v0.12.10"></a>
|
||||
## [v0.12.10] - 2020-01-14
|
||||
|
||||
<a name="v0.12.9"></a>
|
||||
## [v0.12.9] - 2020-01-14
|
||||
|
||||
<a name="v0.12.8"></a>
|
||||
## [v0.12.8] - 2020-01-13
|
||||
|
||||
<a name="v0.12.7"></a>
|
||||
## [v0.12.7] - 2020-01-11
|
||||
### Pull Requests
|
||||
- Merge pull request [#22](https://github.com/dosco/super-graph/issues/22) from bhaskarmurthy/fix-grammer-syntax
|
||||
|
||||
|
||||
<a name="v0.12.6"></a>
|
||||
## [v0.12.6] - 2019-12-02
|
||||
### Add
|
||||
- Add support for `websearch_to_tsquery` in PG 11
|
||||
|
||||
|
||||
<a name="v0.12.5"></a>
|
||||
## [v0.12.5] - 2019-11-30
|
||||
### Add
|
||||
- Add a guide to the internals of the codebase
|
||||
- Add a CONTRIBUTING.md guide for contributors
|
||||
- Add a CHANGLOG.md
|
||||
- Add issue templates
|
||||
|
||||
### Fix
|
||||
- Fix for missing filters on nested selectors
|
||||
|
||||
### Refactor
|
||||
- Refactor rename 'Select.Table` to `Select.Name`
|
||||
|
||||
|
||||
<a name="v0.12.4"></a>
|
||||
## [v0.12.4] - 2019-11-28
|
||||
### Move
|
||||
- Move license from MIT to Apache 2.0. Add Makefile
|
||||
|
||||
|
||||
<a name="v0.12.3"></a>
|
||||
## [v0.12.3] - 2019-11-26
|
||||
### Added
|
||||
- Added support for query names to the allow.list
|
||||
|
||||
|
||||
<a name="v0.12.2"></a>
|
||||
## [v0.12.2] - 2019-11-25
|
||||
### Fix
|
||||
- Fix bug with compiling anon queries
|
||||
|
||||
|
||||
<a name="v0.12.1"></a>
|
||||
## [v0.12.1] - 2019-11-22
|
||||
### Move
|
||||
- Move sql query logging from info to debug
|
||||
|
||||
|
||||
<a name="v0.12.0"></a>
|
||||
## [v0.12.0] - 2019-11-22
|
||||
### Use
|
||||
- Use logger error instead of panic in goja handlers
|
||||
|
||||
|
||||
<a name="v0.11.9"></a>
|
||||
## [v0.11.9] - 2019-11-22
|
||||
### Add
|
||||
- Add a db:reset command only for dev mode
|
||||
|
||||
|
||||
<a name="v0.11.8"></a>
|
||||
## [v0.11.8] - 2019-11-21
|
||||
### Optimize
|
||||
- Optimize db queries limit use of transactions
|
||||
|
||||
|
||||
<a name="v0.11.7"></a>
|
||||
## [v0.11.7] - 2019-11-19
|
||||
### Added
|
||||
- Added support for multi-root queries
|
||||
|
||||
|
||||
<a name="v0.11.6"></a>
|
||||
## [v0.11.6] - 2019-11-15
|
||||
### Fix
|
||||
- Fix issues with JWT auth
|
||||
- Fix bug with migration filename generation
|
||||
- Fix bug with migration file name
|
||||
|
||||
|
||||
<a name="v0.11.5"></a>
|
||||
## [v0.11.5] - 2019-11-10
|
||||
### Fix
|
||||
- Fix bug with migration template name
|
||||
|
||||
|
||||
<a name="v0.11.4"></a>
|
||||
## [v0.11.4] - 2019-11-10
|
||||
### Fix
|
||||
- Fix bug with creating new migrations
|
||||
|
||||
|
||||
<a name="v0.11.3"></a>
|
||||
## [v0.11.3] - 2019-11-09
|
||||
### Fix
|
||||
- Fix macro syntax bug in app templates
|
||||
|
||||
|
||||
<a name="v0.11.2"></a>
|
||||
## [v0.11.2] - 2019-11-07
|
||||
### Fix
|
||||
- Fix bugs and add new production mode
|
||||
|
||||
|
||||
<a name="v0.11.1"></a>
|
||||
## [v0.11.1] - 2019-11-05
|
||||
### Add
|
||||
- Add nested where clause to filter based on related tables
|
||||
|
||||
### Block
|
||||
- Block unauthorized requests when 'anon' role is not defined
|
||||
|
||||
### Update
|
||||
- Update docs and website with new features
|
||||
|
||||
|
||||
<a name="v0.11"></a>
|
||||
## [v0.11] - 2019-11-01
|
||||
### Add
|
||||
- Add config driven presets for insert, update and upsert
|
||||
- Add config driven presets for insert, update and upserta
|
||||
- Add RBAC option to disable functions eg. count
|
||||
- Add fuzz testing to 'serv' for the GQL hash parser
|
||||
- Add fuzz testing to 'jsn' and 'qcode'
|
||||
- Add ability to block queries and mutations by role
|
||||
- Add built in 'anon' and 'user' roles
|
||||
- Add role based access control
|
||||
|
||||
### Allow
|
||||
- Allow config files to inherit from other config files
|
||||
|
||||
### Change
|
||||
- Change config key inherit to inherits
|
||||
|
||||
### Get
|
||||
- Get RBAC working for queries and mutations
|
||||
|
||||
### Optimize
|
||||
- Optimize prepared statement flow for RBAC
|
||||
|
||||
### Preserve
|
||||
- Preserve allow.list ordering on save
|
||||
|
||||
### Update
|
||||
- Update filters section in guide
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#11](https://github.com/dosco/super-graph/issues/11) from dosco/rbac
|
||||
|
||||
|
||||
<a name="v0.10.1"></a>
|
||||
## [v0.10.1] - 2019-10-06
|
||||
### Add
|
||||
- Add ability to set filters per operation / action
|
||||
- Add upsert mutation
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#10](https://github.com/dosco/super-graph/issues/10) from FourSigma/sm-examples-folder
|
||||
|
||||
|
||||
<a name="v0.10"></a>
|
||||
## [v0.10] - 2019-10-04
|
||||
### Fix
|
||||
- Fix return values for bulk mutations and delete
|
||||
- Fix issues with mutation SQL
|
||||
- Fix broken demo app
|
||||
- Fix typo in 'across'
|
||||
|
||||
### Remove
|
||||
- Remove extra link from README
|
||||
|
||||
### Update
|
||||
- Update docs, getting started guide and mutations
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#6](https://github.com/dosco/super-graph/issues/6) from muesli/typo-fixes
|
||||
|
||||
|
||||
<a name="v0.9"></a>
|
||||
## [v0.9] - 2019-10-01
|
||||
### Fix
|
||||
- Fix demo rails app broken build
|
||||
|
||||
|
||||
<a name="v0.8"></a>
|
||||
## [v0.8] - 2019-09-30
|
||||
### Fix
|
||||
- Fix invalid import bug
|
||||
|
||||
### Update
|
||||
- Update documentation site
|
||||
|
||||
|
||||
<a name="v0.7"></a>
|
||||
## [v0.7] - 2019-09-29
|
||||
### Failure
|
||||
- Failure to prepare statements should be a warning
|
||||
|
||||
### Fix
|
||||
- Fix duplicte column bug
|
||||
|
||||
|
||||
<a name="v0.6"></a>
|
||||
## [v0.6] - 2019-09-29
|
||||
### Add
|
||||
- Add database setup commands
|
||||
- Add binary compression back to Dockerfile
|
||||
- Add initialization command to setup new apps
|
||||
- Add migrate command
|
||||
- Add database seeding capability
|
||||
- Add session variable for user id
|
||||
- Add delete mutation
|
||||
- Add update mutation
|
||||
- Add insert mutation with bulk insert
|
||||
- Add GoTO Aug, 19 presentation
|
||||
- Add support for prepared statements
|
||||
- Add end-to-end benchmaking
|
||||
- Add object pooling for parser expressions
|
||||
- Add request / response debugging for remote joins
|
||||
- Add a presentation about GraphQL
|
||||
- Add validation for remote JSON
|
||||
- Add tracing for API stitching
|
||||
- Add REST API stitching
|
||||
- Add SQL query cacheing
|
||||
- Add support for GraphQL variables
|
||||
- Add fuzz testing to qcode
|
||||
- Add test for Rails Redis cookie store integration
|
||||
- Add an install guide
|
||||
|
||||
### Change
|
||||
- Change fuzz test name to qcode
|
||||
- Change logo from PNG to SVG
|
||||
|
||||
### Enabke
|
||||
- Enabke reload on config change
|
||||
|
||||
### Fix
|
||||
- Fix missing config name bug
|
||||
- Fix new app templates
|
||||
- Fix help message for migrate
|
||||
- Fix session variable bug
|
||||
- Fix test failures in `psql` and `serv`
|
||||
- Fix demo docker services startup order
|
||||
- Fix wrong value for false token bug. Reported by [@ThisIsMissEm](https://github.com/ThisIsMissEm)
|
||||
- Fix allow.list file discovery bug
|
||||
- Fix bug with allow list path
|
||||
- Fix wrong value for use_allow_list in dev config
|
||||
- Fix startup bug in demo script
|
||||
- Fix url bug in allow list
|
||||
- Fix bug [#676](https://github.com/dosco/super-graph/issues/676) found by fuzzer
|
||||
- Fix race-condition in remote joins
|
||||
- Fix cookie passing in web ui
|
||||
- Fix bug with passing cookies in web ui
|
||||
- Fix null pointer with invalid argument values
|
||||
- Fix infinite loop bug in lexer
|
||||
- Fix null pointer issue found by fuzz test
|
||||
- Fix issue with fuzzbuzz config
|
||||
- Fix demo to run as memory only
|
||||
- Fix auth documentation
|
||||
- Fix issue with web ui sizing
|
||||
- Fix issue preventing docker-compose deploy
|
||||
- Fix try demo documentation
|
||||
|
||||
### Futher
|
||||
- Futher reduce allocations across hot paths
|
||||
- Futher reduce allocations on the compiler hot path
|
||||
- Futher optimize json parsing and editing performance
|
||||
|
||||
### Highlight
|
||||
- Highlight top features better on the site
|
||||
|
||||
### Improve
|
||||
- Improve readability of json parser code
|
||||
- Improve the motivation section in the readme
|
||||
- Improve the demo experience
|
||||
|
||||
### Make
|
||||
- Make remote joins use parallel http requests
|
||||
|
||||
### Merge
|
||||
- Merge branch 'master' into optimize-psql
|
||||
|
||||
### New
|
||||
- New low allocation fast json parsing and editing library
|
||||
|
||||
### Optimize
|
||||
- Optimize lexer and fix bugs
|
||||
- Optimize the sql generator hot path
|
||||
|
||||
### Reduce
|
||||
- Reduce alllocations done by the stack
|
||||
- Reduce steps to run the demo
|
||||
- Reduce allocations and improve perf over 50%
|
||||
|
||||
### Remove
|
||||
- Remove unused packages
|
||||
- Remove the 'hello' test app folder
|
||||
- Remove other allocations in psql
|
||||
|
||||
### Use
|
||||
- Use hash's as ids for table relationships
|
||||
|
||||
### Watch
|
||||
- Watch and reload on config changes
|
||||
|
||||
|
||||
<a name="v0.5"></a>
|
||||
## [v0.5] - 2019-04-10
|
||||
### Add
|
||||
- Add supprt for new Rails 5.2 aes-256-gcm cookies
|
||||
- Add query support for ts_rank and ts_headline
|
||||
- Add full text search support using TSV indexes
|
||||
- Add missing assets folder
|
||||
- Add fetch by ID feature
|
||||
- Add documentation
|
||||
|
||||
### Cleanup
|
||||
- Cleanup and redesign config files
|
||||
|
||||
### Fix
|
||||
- Fix bug with auth config parsing
|
||||
|
||||
### Redesign
|
||||
- Redesign config file architecture
|
||||
|
||||
### Reduce
|
||||
- Reduce realloc of maps and slices
|
||||
|
||||
### Update
|
||||
- Update docs with full-text search information
|
||||
|
||||
|
||||
<a name="v0.4"></a>
|
||||
## [v0.4] - 2019-04-01
|
||||
|
||||
<a name="v0.3"></a>
|
||||
## [v0.3] - 2019-04-01
|
||||
### Add
|
||||
- Add SQL execution timing and tracing
|
||||
- Add support for HAVING with aggregate queries
|
||||
- Add aggregrate functions to GQL queries
|
||||
- Add Auth0 JWT support
|
||||
- Add React UI building to the docker build flow
|
||||
- Add compiler profiling
|
||||
- Add bechmarks for GQL to SQL compile
|
||||
- Add tests for gql to sql compile
|
||||
|
||||
### Cleanup
|
||||
- Cleanup Dockerfile
|
||||
|
||||
### Fix
|
||||
- Fix recurring packer issue docker hub builds
|
||||
- Fix issue with asset packer breaking Docker builds
|
||||
- Fix missing git package in Dockerfile
|
||||
- Fix docker ignore values
|
||||
- Fix image build failure on docker hub
|
||||
- Fix build issue in Dockerfile
|
||||
- Fix bugs and document the 'where' clause
|
||||
- Fix perf issue with inflections
|
||||
|
||||
### Optimize
|
||||
- Optimize docker image
|
||||
|
||||
### Pack
|
||||
- Pack web UI with app into a single binary
|
||||
|
||||
### Upgrade
|
||||
- Upgrade web UI packages
|
||||
|
||||
|
||||
<a name="0.3"></a>
|
||||
## 0.3 - 2019-03-24
|
||||
### First
|
||||
- First commit
|
||||
|
||||
### Fix
|
||||
- Fix license to MIT
|
||||
|
||||
|
||||
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.12.6...HEAD
|
||||
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.13.22...HEAD
|
||||
[v0.13.22]: https://github.com/dosco/super-graph/compare/v0.13.21...v0.13.22
|
||||
[v0.13.21]: https://github.com/dosco/super-graph/compare/v0.13.20...v0.13.21
|
||||
[v0.13.20]: https://github.com/dosco/super-graph/compare/v0.13.19...v0.13.20
|
||||
[v0.13.19]: https://github.com/dosco/super-graph/compare/v0.13.18...v0.13.19
|
||||
[v0.13.18]: https://github.com/dosco/super-graph/compare/v0.13.17...v0.13.18
|
||||
[v0.13.17]: https://github.com/dosco/super-graph/compare/v0.13.16...v0.13.17
|
||||
[v0.13.16]: https://github.com/dosco/super-graph/compare/v0.13.15...v0.13.16
|
||||
[v0.13.15]: https://github.com/dosco/super-graph/compare/v0.13.14...v0.13.15
|
||||
[v0.13.14]: https://github.com/dosco/super-graph/compare/v0.13.13...v0.13.14
|
||||
[v0.13.13]: https://github.com/dosco/super-graph/compare/v0.13.12...v0.13.13
|
||||
[v0.13.12]: https://github.com/dosco/super-graph/compare/v0.13.11...v0.13.12
|
||||
[v0.13.11]: https://github.com/dosco/super-graph/compare/v0.13.10...v0.13.11
|
||||
[v0.13.10]: https://github.com/dosco/super-graph/compare/v0.13.9...v0.13.10
|
||||
[v0.13.9]: https://github.com/dosco/super-graph/compare/v0.13.8...v0.13.9
|
||||
[v0.13.8]: https://github.com/dosco/super-graph/compare/v0.13.7...v0.13.8
|
||||
[v0.13.7]: https://github.com/dosco/super-graph/compare/v0.13.6...v0.13.7
|
||||
[v0.13.6]: https://github.com/dosco/super-graph/compare/v0.13.5...v0.13.6
|
||||
[v0.13.5]: https://github.com/dosco/super-graph/compare/v0.13.4...v0.13.5
|
||||
[v0.13.4]: https://github.com/dosco/super-graph/compare/v0.13.3...v0.13.4
|
||||
[v0.13.3]: https://github.com/dosco/super-graph/compare/v0.13.2...v0.13.3
|
||||
[v0.13.2]: https://github.com/dosco/super-graph/compare/v0.13.1...v0.13.2
|
||||
[v0.13.1]: https://github.com/dosco/super-graph/compare/v0.13.0...v0.13.1
|
||||
[v0.13.0]: https://github.com/dosco/super-graph/compare/v0.12.49...v0.13.0
|
||||
[v0.12.49]: https://github.com/dosco/super-graph/compare/v0.12.48...v0.12.49
|
||||
[v0.12.48]: https://github.com/dosco/super-graph/compare/v0.12.47...v0.12.48
|
||||
[v0.12.47]: https://github.com/dosco/super-graph/compare/v0.12.46...v0.12.47
|
||||
[v0.12.46]: https://github.com/dosco/super-graph/compare/v0.12.45...v0.12.46
|
||||
[v0.12.45]: https://github.com/dosco/super-graph/compare/v0.12.44...v0.12.45
|
||||
[v0.12.44]: https://github.com/dosco/super-graph/compare/v0.12.43...v0.12.44
|
||||
[v0.12.43]: https://github.com/dosco/super-graph/compare/v0.12.42...v0.12.43
|
||||
[v0.12.42]: https://github.com/dosco/super-graph/compare/v0.12.41...v0.12.42
|
||||
[v0.12.41]: https://github.com/dosco/super-graph/compare/v0.12.40...v0.12.41
|
||||
[v0.12.40]: https://github.com/dosco/super-graph/compare/v0.12.39...v0.12.40
|
||||
[v0.12.39]: https://github.com/dosco/super-graph/compare/v0.12.38...v0.12.39
|
||||
[v0.12.38]: https://github.com/dosco/super-graph/compare/v0.12.37...v0.12.38
|
||||
[v0.12.37]: https://github.com/dosco/super-graph/compare/v0.12.36...v0.12.37
|
||||
[v0.12.36]: https://github.com/dosco/super-graph/compare/v0.12.35...v0.12.36
|
||||
[v0.12.35]: https://github.com/dosco/super-graph/compare/v0.12.34...v0.12.35
|
||||
[v0.12.34]: https://github.com/dosco/super-graph/compare/v0.12.33...v0.12.34
|
||||
[v0.12.33]: https://github.com/dosco/super-graph/compare/v0.12.32...v0.12.33
|
||||
[v0.12.32]: https://github.com/dosco/super-graph/compare/v0.12.31...v0.12.32
|
||||
[v0.12.31]: https://github.com/dosco/super-graph/compare/v0.12.30...v0.12.31
|
||||
[v0.12.30]: https://github.com/dosco/super-graph/compare/v0.12.29...v0.12.30
|
||||
[v0.12.29]: https://github.com/dosco/super-graph/compare/v0.12.28...v0.12.29
|
||||
[v0.12.28]: https://github.com/dosco/super-graph/compare/v0.12.27...v0.12.28
|
||||
[v0.12.27]: https://github.com/dosco/super-graph/compare/v0.12.26...v0.12.27
|
||||
[v0.12.26]: https://github.com/dosco/super-graph/compare/v0.12.25...v0.12.26
|
||||
[v0.12.25]: https://github.com/dosco/super-graph/compare/v0.12.24...v0.12.25
|
||||
[v0.12.24]: https://github.com/dosco/super-graph/compare/v0.12.23...v0.12.24
|
||||
[v0.12.23]: https://github.com/dosco/super-graph/compare/v0.12.22...v0.12.23
|
||||
[v0.12.22]: https://github.com/dosco/super-graph/compare/v0.12.21...v0.12.22
|
||||
[v0.12.21]: https://github.com/dosco/super-graph/compare/v0.12.20...v0.12.21
|
||||
[v0.12.20]: https://github.com/dosco/super-graph/compare/v0.12.19...v0.12.20
|
||||
[v0.12.19]: https://github.com/dosco/super-graph/compare/v0.12.18...v0.12.19
|
||||
[v0.12.18]: https://github.com/dosco/super-graph/compare/v0.12.17...v0.12.18
|
||||
[v0.12.17]: https://github.com/dosco/super-graph/compare/v0.12.16...v0.12.17
|
||||
[v0.12.16]: https://github.com/dosco/super-graph/compare/v0.12.15...v0.12.16
|
||||
[v0.12.15]: https://github.com/dosco/super-graph/compare/v0.12.14...v0.12.15
|
||||
[v0.12.14]: https://github.com/dosco/super-graph/compare/v0.12.13...v0.12.14
|
||||
[v0.12.13]: https://github.com/dosco/super-graph/compare/v0.12.12...v0.12.13
|
||||
[v0.12.12]: https://github.com/dosco/super-graph/compare/v0.12.11...v0.12.12
|
||||
[v0.12.11]: https://github.com/dosco/super-graph/compare/v0.12.10...v0.12.11
|
||||
[v0.12.10]: https://github.com/dosco/super-graph/compare/v0.12.9...v0.12.10
|
||||
[v0.12.9]: https://github.com/dosco/super-graph/compare/v0.12.8...v0.12.9
|
||||
[v0.12.8]: https://github.com/dosco/super-graph/compare/v0.12.7...v0.12.8
|
||||
[v0.12.7]: https://github.com/dosco/super-graph/compare/v0.12.6...v0.12.7
|
||||
[v0.12.6]: https://github.com/dosco/super-graph/compare/v0.12.5...v0.12.6
|
||||
[v0.12.5]: https://github.com/dosco/super-graph/compare/v0.12.4...v0.12.5
|
||||
[v0.12.4]: https://github.com/dosco/super-graph/compare/v0.12.3...v0.12.4
|
||||
|
6
Makefile
6
Makefile
@ -22,7 +22,7 @@ BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime
|
||||
.PHONY: all build gen clean test run lint changlog release version help $(PLATFORMS)
|
||||
|
||||
test:
|
||||
@go test -v ./...
|
||||
@go test -v -short -race ./...
|
||||
|
||||
BIN_DIR := $(GOPATH)/bin
|
||||
GORICE := $(BIN_DIR)/rice
|
||||
@ -39,13 +39,13 @@ $(WEB_BUILD_DIR):
|
||||
@exit 1
|
||||
|
||||
$(GITCHGLOG):
|
||||
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/git-chglog
|
||||
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/cmd/git-chglog
|
||||
|
||||
changelog: $(GITCHGLOG)
|
||||
@git-chglog $(ARGS)
|
||||
|
||||
$(GOLANGCILINT):
|
||||
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.21.0
|
||||
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.25.1
|
||||
|
||||
lint: $(GOLANGCILINT)
|
||||
@golangci-lint run ./... --skip-dirs-use-default
|
||||
|
33
README.md
33
README.md
@ -1,6 +1,6 @@
|
||||
<img src="docs/guide/.vuepress/public/super-graph.png" width="250" />
|
||||
|
||||
### Build web products faster. Secure high performance GraphQL
|
||||
### Build web products faster. Secure high-performance GraphQL
|
||||
|
||||
[](https://pkg.go.dev/github.com/dosco/super-graph/core?tab=doc)
|
||||

|
||||
@ -10,12 +10,12 @@
|
||||
|
||||
## What's Super Graph?
|
||||
|
||||
Designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance GraphQL API for Postgres DB. GraphQL queries are compiled into a single fast SQL query. Super Graph is a GO library and a service, use it in your own code or run it as a seperate service.
|
||||
Designed to 100x your developer productivity. Super Graph will instantly, and without you writing any code, provide a high performance GraphQL API for your PostgresSQL DB. GraphQL queries are compiled into a single fast SQL query. Super Graph is a Go library and a service, use it in your own code or run it as a separate service.
|
||||
|
||||
## Using it as a service
|
||||
|
||||
```console
|
||||
get get https://github.com/dosco/super-graph
|
||||
go get github.com/dosco/super-graph
|
||||
super-graph new <app_name>
|
||||
```
|
||||
|
||||
@ -35,17 +35,12 @@ import (
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
conf, err := core.ReadInConfig("./config/dev.yml")
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
}
|
||||
|
||||
sg, err = core.NewSuperGraph(conf, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
@ -58,7 +53,7 @@ func main() {
|
||||
|
||||
res, err := sg.GraphQL(context.Background(), query, nil)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
@ -67,7 +62,7 @@ func main() {
|
||||
|
||||
## About Super Graph
|
||||
|
||||
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
|
||||
After working on several products through my career I found that we spend way too much time on building API backends. Most APIs also need constant updating, and this costs time and money.
|
||||
|
||||
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.
|
||||
|
||||
@ -75,28 +70,27 @@ I didn't want to write this code anymore, I wanted the computer to do it. Enter
|
||||
|
||||
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.
|
||||
|
||||
This compiler is what sits at the heart of Super Graph with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations and everything else needed for you to build production ready apps with it.
|
||||
This compiler is what sits at the heart of Super Graph, with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations, and everything else needed for you to build production-ready apps with it.
|
||||
|
||||
## Features
|
||||
|
||||
- Complex nested queries and mutations
|
||||
- Auto learns database tables and relationships
|
||||
- Role and Attribute based access control
|
||||
- Opaque cursor based efficient pagination
|
||||
- Full text search and aggregations
|
||||
- Role and Attribute-based access control
|
||||
- Opaque cursor-based efficient pagination
|
||||
- Full-text search and aggregations
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join database queries with remote REST APIs
|
||||
- Also works with existing Ruby-On-Rails apps
|
||||
- Rails authentication supported (Redis, Memcache, Cookie)
|
||||
- A simple config file
|
||||
- High performance GO codebase
|
||||
- High performance Go codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Fuzz tested for security
|
||||
- Database migrations tool
|
||||
- Database seeding tool
|
||||
- Works with Postgres and YugabyteDB
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
[supergraph.dev](https://supergraph.dev)
|
||||
@ -116,4 +110,3 @@ Twitter or Discord.
|
||||
|
||||
Copyright (c) 2019-present Vikram Rangnekar
|
||||
|
||||
|
||||
|
@ -45,6 +45,13 @@ cors_allowed_origins: ["*"]
|
||||
# Debug Cross Origin Resource Sharing requests
|
||||
cors_debug: true
|
||||
|
||||
# Default API path prefix is /api you can change it if you like
|
||||
# api_path: "/data"
|
||||
|
||||
# Cache-Control header can help cache queries if your CDN supports cache-control
|
||||
# on POST requests (does not work with not mutations)
|
||||
# cache_control: "public, max-age=300, s-maxage=600"
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
|
52
core/api.go
52
core/api.go
@ -16,17 +16,12 @@
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
conf, err := core.ReadInConfig("./config/dev.yml")
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
}
|
||||
|
||||
sg, err = core.NewSuperGraph(conf, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
@ -39,7 +34,7 @@
|
||||
|
||||
res, err := sg.GraphQL(context.Background(), query, nil)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
@ -82,6 +77,7 @@ type SuperGraph struct {
|
||||
conf *Config
|
||||
db *sql.DB
|
||||
log *_log.Logger
|
||||
dbinfo *psql.DBInfo
|
||||
schema *psql.DBSchema
|
||||
allowList *allow.List
|
||||
encKey [32]byte
|
||||
@ -93,15 +89,26 @@ type SuperGraph struct {
|
||||
anonExists bool
|
||||
qc *qcode.Compiler
|
||||
pc *psql.Compiler
|
||||
ge *graphql.Engine
|
||||
}
|
||||
|
||||
// NewSuperGraph creates the SuperGraph struct, this involves querying the database to learn its
|
||||
// schemas and relationships
|
||||
func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||
return newSuperGraph(conf, db, nil)
|
||||
}
|
||||
|
||||
// newSuperGraph helps with writing tests and benchmarks
|
||||
func newSuperGraph(conf *Config, db *sql.DB, dbinfo *psql.DBInfo) (*SuperGraph, error) {
|
||||
if conf == nil {
|
||||
conf = &Config{}
|
||||
}
|
||||
|
||||
sg := &SuperGraph{
|
||||
conf: conf,
|
||||
db: db,
|
||||
log: _log.New(os.Stdout, "", 0),
|
||||
conf: conf,
|
||||
db: db,
|
||||
dbinfo: dbinfo,
|
||||
log: _log.New(os.Stdout, "", 0),
|
||||
}
|
||||
|
||||
if err := sg.initConfig(); err != nil {
|
||||
@ -124,6 +131,10 @@ func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initGraphQLEgine(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(conf.SecretKey) != 0 {
|
||||
sk := sha256.Sum256([]byte(conf.SecretKey))
|
||||
conf.SecretKey = ""
|
||||
@ -163,14 +174,9 @@ func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMess
|
||||
// use the chirino/graphql library for introspection queries
|
||||
// disabled when allow list is enforced
|
||||
if !sg.conf.UseAllowList && res.name == "IntrospectionQuery" {
|
||||
engine, err := sg.createGraphQLEgine()
|
||||
if err != nil {
|
||||
res.Error = err.Error()
|
||||
return &res, err
|
||||
}
|
||||
|
||||
r := engine.ExecuteOne(&graphql.EngineRequest{Query: query})
|
||||
r := sg.ge.ServeGraphQL(&graphql.Request{Query: query})
|
||||
res.Data = r.Data
|
||||
|
||||
if r.Error() != nil {
|
||||
res.Error = r.Error().Error()
|
||||
}
|
||||
@ -199,10 +205,8 @@ func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMess
|
||||
return &ct.res, nil
|
||||
}
|
||||
|
||||
// GraphQLSchema function return the GraphQL schema for the underlying database connected
|
||||
// to this instance of Super Graph
|
||||
func (sg *SuperGraph) GraphQLSchema() (string, error) {
|
||||
engine, err := sg.createGraphQLEgine()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return engine.Schema.String(), nil
|
||||
return sg.ge.Schema.String(), nil
|
||||
}
|
||||
|
62
core/api_test.go
Normal file
62
core/api_test.go
Normal file
@ -0,0 +1,62 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
func BenchmarkGraphQL(b *testing.B) {
|
||||
ct := context.WithValue(context.Background(), UserIDKey, "1")
|
||||
|
||||
db, _, err := sqlmock.New()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// mock.ExpectQuery(`^SELECT jsonb_build_object`).WithArgs()
|
||||
c := &Config{DefaultBlock: true}
|
||||
sg, err := newSuperGraph(c, db, psql.GetTestDBInfo())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user {
|
||||
full_name
|
||||
phone
|
||||
email
|
||||
}
|
||||
customers {
|
||||
id
|
||||
email
|
||||
}
|
||||
}
|
||||
users {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err = sg.GraphQL(ct, query, nil)
|
||||
}
|
||||
})
|
||||
|
||||
fmt.Println(err)
|
||||
|
||||
//fmt.Println(mock.ExpectationsWereMet())
|
||||
|
||||
}
|
33
core/args.go
33
core/args.go
@ -9,6 +9,8 @@ import (
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
// argMap function is used to string replace variables with values by
|
||||
// the fasttemplate code
|
||||
func (c *scontext) argMap() func(w io.Writer, tag string) (int, error) {
|
||||
return func(w io.Writer, tag string) (int, error) {
|
||||
switch tag {
|
||||
@ -56,10 +58,13 @@ func (c *scontext) argMap() func(w io.Writer, tag string) (int, error) {
|
||||
return w.Write(v1)
|
||||
}
|
||||
|
||||
return w.Write(escQuote(fields[0].Value))
|
||||
return w.Write(escSQuote(fields[0].Value))
|
||||
}
|
||||
}
|
||||
|
||||
// argList function is used to create a list of arguments to pass
|
||||
// to a prepared statement. FYI no escaping of single quotes is
|
||||
// needed here
|
||||
func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
vars := make([]interface{}, len(args))
|
||||
|
||||
@ -113,7 +118,7 @@ func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
if v, ok := fields[string(av)]; ok {
|
||||
switch v[0] {
|
||||
case '[', '{':
|
||||
vars[i] = escQuote(v)
|
||||
vars[i] = v
|
||||
default:
|
||||
var val interface{}
|
||||
if err := json.Unmarshal(v, &val); err != nil {
|
||||
@ -132,27 +137,25 @@ func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
|
||||
return vars, nil
|
||||
}
|
||||
|
||||
func escQuote(b []byte) []byte {
|
||||
f := false
|
||||
for i := range b {
|
||||
if b[i] == '\'' {
|
||||
f = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !f {
|
||||
return b
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
//
|
||||
func escSQuote(b []byte) []byte {
|
||||
var buf *bytes.Buffer
|
||||
s := 0
|
||||
for i := range b {
|
||||
if b[i] == '\'' {
|
||||
if buf == nil {
|
||||
buf = &bytes.Buffer{}
|
||||
}
|
||||
buf.Write(b[s:i])
|
||||
buf.WriteString(`''`)
|
||||
s = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
if buf == nil {
|
||||
return b
|
||||
}
|
||||
|
||||
l := len(b)
|
||||
if s < (l - 1) {
|
||||
buf.Write(b[s:l])
|
||||
|
13
core/args_test.go
Normal file
13
core/args_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package core
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEscQuote(t *testing.T) {
|
||||
val := "That's the worst, don''t be calling me's again"
|
||||
exp := "That''s the worst, don''''t be calling me''s again"
|
||||
ret := escSQuote([]byte(val))
|
||||
|
||||
if exp != string(ret) {
|
||||
t.Errorf("escSQuote failed: %s", string(ret))
|
||||
}
|
||||
}
|
@ -167,16 +167,16 @@ func (sg *SuperGraph) renderUserQuery(stmts []stmt) (string, error) {
|
||||
return w.String(), nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) hasTablesWithConfig(qc *qcode.QCode, role *Role) bool {
|
||||
for _, id := range qc.Roots {
|
||||
t, err := sg.schema.GetTable(qc.Selects[id].Name)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// func (sg *SuperGraph) hasTablesWithConfig(qc *qcode.QCode, role *Role) bool {
|
||||
// for _, id := range qc.Roots {
|
||||
// t, err := sg.schema.GetTable(qc.Selects[id].Name)
|
||||
// if err != nil {
|
||||
// return false
|
||||
// }
|
||||
|
||||
if r := role.GetTable(t.Name); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
// if r := role.GetTable(t.Name); r == nil {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
// return true
|
||||
// }
|
||||
|
@ -3,6 +3,7 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
@ -10,16 +11,56 @@ import (
|
||||
|
||||
// Core struct contains core specific config value
|
||||
type Config struct {
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
UseAllowList bool `mapstructure:"use_allow_list"`
|
||||
AllowListFile string `mapstructure:"allow_list_file"`
|
||||
SetUserID bool `mapstructure:"set_user_id"`
|
||||
Vars map[string]string `mapstructure:"variables"`
|
||||
Blocklist []string
|
||||
Tables []Table
|
||||
RolesQuery string `mapstructure:"roles_query"`
|
||||
Roles []Role
|
||||
Inflections map[string]string
|
||||
// SecretKey is used to encrypt opaque values such as
|
||||
// the cursor. Auto-generated if not set
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
|
||||
// UseAllowList (aka production mode) when set to true ensures
|
||||
// only queries lists in the allow.list file can be used. All
|
||||
// queries are pre-prepared so no compiling happens and things are
|
||||
// very fast.
|
||||
UseAllowList bool `mapstructure:"use_allow_list"`
|
||||
|
||||
// AllowListFile if the path to allow list file if not set the
|
||||
// path is assumed to tbe the same as the config path (allow.list)
|
||||
AllowListFile string `mapstructure:"allow_list_file"`
|
||||
|
||||
// SetUserID forces the database session variable `user.id` to
|
||||
// be set to the user id. This variables can be used by triggers
|
||||
// or other database functions
|
||||
SetUserID bool `mapstructure:"set_user_id"`
|
||||
|
||||
// DefaultBlock ensures only tables configured under the `anon` role
|
||||
// config can be queries if the `anon` role. For example if the table
|
||||
// `users` is not listed under the anon role then it will be filtered
|
||||
// out of any unauthenticated queries that mention it.
|
||||
DefaultBlock bool `mapstructure:"default_block"`
|
||||
|
||||
// Vars is a map of hardcoded variables that can be leveraged in your
|
||||
// queries (eg variable admin_id will be $admin_id in the query)
|
||||
Vars map[string]string `mapstructure:"variables"`
|
||||
|
||||
// Blocklist is a list of tables and columns that should be filtered
|
||||
// out from any and all queries
|
||||
Blocklist []string
|
||||
|
||||
// Tables contains all table specific configuration such as aliased tables
|
||||
// creating relationships between tables, etc
|
||||
Tables []Table
|
||||
|
||||
// RolesQuery if set enabled attributed based access control. This query
|
||||
// is use to fetch the user attributes that then dynamically define the users
|
||||
// role.
|
||||
RolesQuery string `mapstructure:"roles_query"`
|
||||
|
||||
// Roles contains all the configuration for all the roles you want to support
|
||||
// `user` and `anon` are two default roles. User role is for when a user ID is
|
||||
// available and Anon when it's not.
|
||||
Roles []Role
|
||||
|
||||
// Inflections is to add additionally singular to plural mappings
|
||||
// to the engine (eg. sheep: sheep)
|
||||
Inflections map[string]string `mapstructure:"inflections"`
|
||||
}
|
||||
|
||||
// Table struct defines a database table
|
||||
@ -155,9 +196,13 @@ func newViper(configPath, configFile string) *viper.Viper {
|
||||
vi.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
vi.AutomaticEnv()
|
||||
|
||||
vi.SetConfigName(configFile)
|
||||
vi.AddConfigPath(configPath)
|
||||
vi.AddConfigPath("./config")
|
||||
if len(filepath.Ext(configFile)) != 0 {
|
||||
vi.SetConfigFile(configFile)
|
||||
} else {
|
||||
vi.SetConfigName(configFile)
|
||||
vi.AddConfigPath(configPath)
|
||||
vi.AddConfigPath("./config")
|
||||
}
|
||||
|
||||
return vi
|
||||
}
|
||||
|
45
core/core.go
45
core/core.go
@ -14,6 +14,11 @@ import (
|
||||
"github.com/valyala/fasttemplate"
|
||||
)
|
||||
|
||||
const (
|
||||
OpQuery int = iota
|
||||
OpMutation
|
||||
)
|
||||
|
||||
type extensions struct {
|
||||
Tracing *trace `json:"tracing,omitempty"`
|
||||
}
|
||||
@ -50,26 +55,33 @@ type scontext struct {
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initCompilers() error {
|
||||
di, err := psql.GetDBInfo(sg.db)
|
||||
if err != nil {
|
||||
var err error
|
||||
|
||||
// If sg.di is not null then it's probably set
|
||||
// for tests
|
||||
if sg.dbinfo == nil {
|
||||
sg.dbinfo, err = psql.GetDBInfo(sg.db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = addTables(sg.conf, sg.dbinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = addTables(sg.conf, di); err != nil {
|
||||
if err = addForeignKeys(sg.conf, sg.dbinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = addForeignKeys(sg.conf, di); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.schema, err = psql.NewDBSchema(di, getDBTableAliases(sg.conf))
|
||||
sg.schema, err = psql.NewDBSchema(sg.dbinfo, getDBTableAliases(sg.conf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.qc, err = qcode.NewCompiler(qcode.Config{
|
||||
Blocklist: sg.conf.Blocklist,
|
||||
DefaultBlock: sg.conf.DefaultBlock,
|
||||
Blocklist: sg.conf.Blocklist,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -322,7 +334,20 @@ func (c *scontext) executeRoleQuery(tx *sql.Tx) (string, error) {
|
||||
return role, nil
|
||||
}
|
||||
|
||||
func (r *Result) Operation() string {
|
||||
func (r *Result) Operation() int {
|
||||
switch r.op {
|
||||
case qcode.QTQuery:
|
||||
return OpQuery
|
||||
|
||||
case qcode.QTMutation, qcode.QTInsert, qcode.QTUpdate, qcode.QTUpsert, qcode.QTDelete:
|
||||
return OpMutation
|
||||
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Result) OperationName() string {
|
||||
return r.op.String()
|
||||
}
|
||||
|
||||
|
@ -1,483 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/chirino/graphql"
|
||||
"github.com/chirino/graphql/resolvers"
|
||||
"github.com/chirino/graphql/schema"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
var typeMap map[string]string = map[string]string{
|
||||
"smallint": "Int",
|
||||
"integer": "Int",
|
||||
"bigint": "Int",
|
||||
"smallserial": "Int",
|
||||
"serial": "Int",
|
||||
"bigserial": "Int",
|
||||
"decimal": "Float",
|
||||
"numeric": "Float",
|
||||
"real": "Float",
|
||||
"double precision": "Float",
|
||||
"money": "Float",
|
||||
"boolean": "Boolean",
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) createGraphQLEgine() (*graphql.Engine, error) {
|
||||
engine := graphql.New()
|
||||
engineSchema := engine.Schema
|
||||
dbSchema := sg.schema
|
||||
|
||||
engineSchema.Parse(`
|
||||
enum OrderDirection {
|
||||
asc
|
||||
desc
|
||||
}
|
||||
`)
|
||||
|
||||
gqltype := func(col psql.DBColumn) schema.Type {
|
||||
typeName := typeMap[strings.ToLower(col.Type)]
|
||||
if typeName == "" {
|
||||
typeName = "String"
|
||||
}
|
||||
var t schema.Type = &schema.TypeName{Ident: schema.Ident{Text: typeName}}
|
||||
if col.NotNull {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
query := &schema.Object{
|
||||
Name: "Query",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
mutation := &schema.Object{
|
||||
Name: "Mutation",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[query.Name] = query
|
||||
engineSchema.Types[mutation.Name] = mutation
|
||||
engineSchema.EntryPoints[schema.Query] = query
|
||||
engineSchema.EntryPoints[schema.Mutation] = mutation
|
||||
|
||||
validGraphQLIdentifierRegex := regexp.MustCompile(`^[A-Za-z_][A-Za-z_0-9]*$`)
|
||||
|
||||
scalarExpressionTypesNeeded := map[string]bool{}
|
||||
tableNames := dbSchema.GetTableNames()
|
||||
for _, table := range tableNames {
|
||||
|
||||
ti, err := dbSchema.GetTable(table)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !ti.IsSingular {
|
||||
continue
|
||||
}
|
||||
|
||||
singularName := ti.Singular
|
||||
if !validGraphQLIdentifierRegex.MatchString(singularName) {
|
||||
return nil, errors.New("table name is not a valid GraphQL identifier: " + singularName)
|
||||
}
|
||||
pluralName := ti.Plural
|
||||
if !validGraphQLIdentifierRegex.MatchString(pluralName) {
|
||||
return nil, errors.New("table name is not a valid GraphQL identifier: " + pluralName)
|
||||
}
|
||||
|
||||
outputType := &schema.Object{
|
||||
Name: singularName + "Output",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[outputType.Name] = outputType
|
||||
|
||||
inputType := &schema.InputObject{
|
||||
Name: singularName + "Input",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[inputType.Name] = inputType
|
||||
|
||||
orderByType := &schema.InputObject{
|
||||
Name: singularName + "OrderBy",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[orderByType.Name] = orderByType
|
||||
|
||||
expressionTypeName := singularName + "Expression"
|
||||
expressionType := &schema.InputObject{
|
||||
Name: expressionTypeName,
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "and"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: expressionTypeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "or"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: expressionTypeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "not"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: expressionTypeName}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
|
||||
for _, col := range ti.Columns {
|
||||
colName := col.Name
|
||||
if !validGraphQLIdentifierRegex.MatchString(colName) {
|
||||
return nil, errors.New("column name is not a valid GraphQL identifier: " + colName)
|
||||
}
|
||||
|
||||
colType := gqltype(col)
|
||||
nullableColType := ""
|
||||
if x, ok := colType.(*schema.NonNull); ok {
|
||||
nullableColType = x.OfType.(*schema.TypeName).Ident.Text
|
||||
} else {
|
||||
nullableColType = colType.(*schema.TypeName).Ident.Text
|
||||
}
|
||||
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
|
||||
// If it's a numeric type...
|
||||
if nullableColType == "Float" || nullableColType == "Int" {
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "avg_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "count_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "max_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "min_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "variance_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
inputType.Fields = append(inputType.Fields, &schema.InputValue{
|
||||
Name: schema.Ident{Text: colName},
|
||||
Type: colType,
|
||||
})
|
||||
orderByType.Fields = append(orderByType.Fields, &schema.InputValue{
|
||||
Name: schema.Ident{Text: colName},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "OrderDirection"}}},
|
||||
})
|
||||
|
||||
scalarExpressionTypesNeeded[nullableColType] = true
|
||||
|
||||
expressionType.Fields = append(expressionType.Fields, &schema.InputValue{
|
||||
Name: schema.Ident{Text: colName},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: nullableColType + "Expression"}}},
|
||||
})
|
||||
}
|
||||
|
||||
outputTypeName := &schema.TypeName{Ident: schema.Ident{Text: outputType.Name}}
|
||||
inputTypeName := &schema.TypeName{Ident: schema.Ident{Text: inputType.Name}}
|
||||
pluralOutputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: outputType.Name}}}}}
|
||||
pluralInputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: inputType.Name}}}}}
|
||||
|
||||
args := schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: "To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."},
|
||||
Name: schema.Ident{Text: "order_by"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: orderByType.Name}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "where"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: expressionType.Name}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "limit"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "Int"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "offset"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "Int"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "first"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "Int"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "last"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "Int"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "before"},
|
||||
Type: &schema.TypeName{Ident: schema.Ident{Text: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "after"},
|
||||
Type: &schema.TypeName{Ident: schema.Ident{Text: "String"}},
|
||||
},
|
||||
}
|
||||
if ti.PrimaryCol != nil {
|
||||
t := gqltype(*ti.PrimaryCol)
|
||||
if _, ok := t.(*schema.NonNull); !ok {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: &schema.Description{Text: "Finds the record by the primary key"},
|
||||
Name: schema.Ident{Text: "id"},
|
||||
Type: t,
|
||||
})
|
||||
}
|
||||
|
||||
if ti.TSVCol != nil {
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: &schema.Description{Text: "Performs full text search using a TSV index"},
|
||||
Name: schema.Ident{Text: "search"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
})
|
||||
}
|
||||
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: singularName,
|
||||
Type: outputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: pluralName,
|
||||
Type: pluralOutputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
|
||||
mutationArgs := append(args, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "insert"},
|
||||
Type: inputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "update"},
|
||||
Type: inputTypeName,
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "upsert"},
|
||||
Type: inputTypeName,
|
||||
},
|
||||
}...)
|
||||
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: singularName,
|
||||
Args: mutationArgs,
|
||||
Type: outputType,
|
||||
})
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: pluralName,
|
||||
Args: append(mutationArgs, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "inserts"},
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "updates"},
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: &schema.Description{Text: ""},
|
||||
Name: schema.Ident{Text: "upserts"},
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
}...),
|
||||
Type: outputType,
|
||||
})
|
||||
}
|
||||
|
||||
for typeName, _ := range scalarExpressionTypesNeeded {
|
||||
expressionType := &schema.InputObject{
|
||||
Name: typeName + "Expression",
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "eq"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "equals"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "neq"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "not_equals"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "gt"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "greater_than"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "lt"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "lesser_than"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "gte"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "greater_or_equals"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "lte"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "lesser_or_equals"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "in"},
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "nin"},
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "not_in"},
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}}}},
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "like"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "nlike"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "not_like"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "ilike"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "nilike"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "not_ilike"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "similar"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "nsimilar"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "not_similar"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "has_key"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "has_key_any"},
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "has_key_all"},
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "contains"},
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: typeName}}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "contained_in"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "String"}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: schema.Ident{Text: "is_null"},
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Ident: schema.Ident{Text: "Boolean"}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
}
|
||||
|
||||
err := engineSchema.ResolveTypes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
engine.Resolver = resolvers.Func(func(request *resolvers.ResolveRequest, next resolvers.Resolution) resolvers.Resolution {
|
||||
resolver := resolvers.MetadataResolver.Resolve(request, next)
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
resolver = resolvers.MethodResolver.Resolve(request, next) // needed by the MetadataResolver
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return engine, nil
|
||||
}
|
10
core/init.go
10
core/init.go
@ -70,6 +70,16 @@ func (sg *SuperGraph) initConfig() error {
|
||||
sg.roles["user"] = &ur
|
||||
}
|
||||
|
||||
// If anon role is not defined and DefaultBlock is not then then create it
|
||||
if _, ok := sg.roles["anon"]; !ok && !c.DefaultBlock {
|
||||
ur := Role{
|
||||
Name: "anon",
|
||||
tm: make(map[string]*RoleTable),
|
||||
}
|
||||
c.Roles = append(c.Roles, ur)
|
||||
sg.roles["anon"] = &ur
|
||||
}
|
||||
|
||||
// Roles: validate and sanitize
|
||||
c.RolesQuery = sanitizeVars(c.RolesQuery)
|
||||
|
||||
|
@ -9,6 +9,8 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -230,6 +232,8 @@ func (al *List) Load() ([]Item, error) {
|
||||
}
|
||||
|
||||
func (al *List) save(item Item) error {
|
||||
var buf bytes.Buffer
|
||||
|
||||
item.Name = QueryName(item.Query)
|
||||
item.key = strings.ToLower(item.Name)
|
||||
|
||||
@ -298,9 +302,16 @@ func (al *List) save(item Item) error {
|
||||
}
|
||||
|
||||
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
|
||||
vj, err := json.MarshalIndent(v.Vars, "", " ")
|
||||
buf.Reset()
|
||||
|
||||
if err := jsn.Clear(&buf, v.Vars); err != nil {
|
||||
return fmt.Errorf("failed to clean vars: %w", err)
|
||||
}
|
||||
vj := json.RawMessage(buf.Bytes())
|
||||
|
||||
vj, err = json.MarshalIndent(vj, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal vars: %v", err)
|
||||
return fmt.Errorf("failed to marshal vars: %w", err)
|
||||
}
|
||||
|
||||
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
|
||||
|
@ -40,8 +40,12 @@ func TestCockroachDB(t *testing.T) {
|
||||
stopDatabase := func() {
|
||||
fmt.Println("stopping temporary cockroach db")
|
||||
if atomic.CompareAndSwapInt32(&stopped, 0, 1) {
|
||||
cmd.Process.Kill()
|
||||
cmd.Process.Wait()
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := cmd.Process.Wait(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func DropSchema(t *testing.T, db *sql.DB) {
|
||||
}
|
||||
|
||||
func TestSuperGraph(t *testing.T, db *sql.DB, before func(t *testing.T)) {
|
||||
config := core.Config{}
|
||||
config := core.Config{DefaultBlock: true}
|
||||
config.UseAllowList = false
|
||||
config.AllowListFile = "./allow.list"
|
||||
config.RolesQuery = `SELECT * FROM users WHERE id = $user_id`
|
||||
|
@ -167,7 +167,7 @@ func (c *compilerContext) renderColumnTypename(sel *qcode.Select, ti *DBTableInf
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnFunction(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
pl := funcPrefixLen(col.Name)
|
||||
pl := funcPrefixLen(c.schema.fm, col.Name)
|
||||
// if pl == 0 {
|
||||
// //fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
// io.WriteString(c.w, `'`)
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
var (
|
||||
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
|
||||
|
||||
schema = getTestSchema()
|
||||
schema = GetTestSchema()
|
||||
|
||||
vars = NewVariables(map[string]string{
|
||||
"admin_account_id": "5",
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
@ -19,7 +20,7 @@ const (
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *Compiler
|
||||
pcompile *psql.Compiler
|
||||
expected map[string][]string
|
||||
)
|
||||
|
||||
@ -133,13 +134,16 @@ func TestMain(m *testing.M) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
schema := getTestSchema()
|
||||
schema, err := psql.GetTestSchema()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
vars := NewVariables(map[string]string{
|
||||
vars := psql.NewVariables(map[string]string{
|
||||
"admin_account_id": "5",
|
||||
})
|
||||
|
||||
pcompile = NewCompiler(Config{
|
||||
pcompile = psql.NewCompiler(psql.Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
@ -173,7 +177,7 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(t *testing.T, gql string, vars Variables, role string) {
|
||||
func compileGQLToPSQL(t *testing.T, gql string, vars psql.Variables, role string) {
|
||||
generateTestFile := false
|
||||
|
||||
if generateTestFile {
|
||||
|
@ -543,7 +543,7 @@ func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo, skip
|
||||
var cn string
|
||||
|
||||
for _, col := range sel.Cols {
|
||||
if n := funcPrefixLen(col.Name); n != 0 {
|
||||
if n := funcPrefixLen(c.schema.fm, col.Name); n != 0 {
|
||||
if !sel.Functions {
|
||||
continue
|
||||
}
|
||||
@ -921,8 +921,6 @@ func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested b
|
||||
st.Push('(')
|
||||
|
||||
case qcode.OpNot:
|
||||
//fmt.Printf("1> %s %d %s %s\n", val.Op, len(val.Children), val.Children[0].Op, val.Children[1].Op)
|
||||
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
|
||||
@ -1193,7 +1191,7 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *
|
||||
io.WriteString(c.w, col.Type)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fn string) int {
|
||||
func funcPrefixLen(fm map[string]*DBFunction, fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
@ -1218,6 +1216,14 @@ func funcPrefixLen(fn string) int {
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
fnLen := len(fn)
|
||||
|
||||
for k := range fm {
|
||||
kLen := len(k)
|
||||
if kLen < fnLen && k[0] == fn[0] && strings.HasPrefix(fn, k) && fn[kLen] == '_' {
|
||||
return kLen + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -11,6 +11,7 @@ type DBSchema struct {
|
||||
ver int
|
||||
t map[string]*DBTableInfo
|
||||
rm map[string]map[string]*DBRel
|
||||
fm map[string]*DBFunction
|
||||
}
|
||||
|
||||
type DBTableInfo struct {
|
||||
@ -56,8 +57,10 @@ type DBRel struct {
|
||||
|
||||
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
|
||||
schema := &DBSchema{
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
ver: info.Version,
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
fm: make(map[string]*DBFunction, len(info.Functions)),
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
@ -81,6 +84,12 @@ func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
|
||||
}
|
||||
}
|
||||
|
||||
for k, f := range info.Functions {
|
||||
if len(f.Params) == 1 {
|
||||
schema.fm[strings.ToLower(f.Name)] = &info.Functions[k]
|
||||
}
|
||||
}
|
||||
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
@ -373,7 +382,7 @@ func (s *DBSchema) updateSchemaOTMT(
|
||||
|
||||
func (s *DBSchema) GetTableNames() []string {
|
||||
var names []string
|
||||
for name, _ := range s.t {
|
||||
for name := range s.t {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
@ -439,3 +448,11 @@ func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetFunctions() []*DBFunction {
|
||||
var funcs []*DBFunction
|
||||
for _, f := range s.fm {
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
return funcs
|
||||
}
|
||||
|
@ -10,10 +10,11 @@ import (
|
||||
)
|
||||
|
||||
type DBInfo struct {
|
||||
Version int
|
||||
Tables []DBTable
|
||||
Columns [][]DBColumn
|
||||
colmap map[string]map[string]*DBColumn
|
||||
Version int
|
||||
Tables []DBTable
|
||||
Columns [][]DBColumn
|
||||
Functions []DBFunction
|
||||
colMap map[string]map[string]*DBColumn
|
||||
}
|
||||
|
||||
func GetDBInfo(db *sql.DB) (*DBInfo, error) {
|
||||
@ -35,41 +36,56 @@ func GetDBInfo(db *sql.DB) (*DBInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.colmap = make(map[string]map[string]*DBColumn, len(di.Tables))
|
||||
|
||||
for i, t := range di.Tables {
|
||||
for _, t := range di.Tables {
|
||||
cols, err := GetColumns(db, "public", t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Columns = append(di.Columns, cols)
|
||||
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
}
|
||||
|
||||
for n, c := range di.Columns[i] {
|
||||
di.colmap[t.Key][c.Key] = &di.Columns[i][n]
|
||||
}
|
||||
di.colMap = newColMap(di.Tables, di.Columns)
|
||||
|
||||
di.Functions, err = GetFunctions(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func newColMap(tables []DBTable, columns [][]DBColumn) map[string]map[string]*DBColumn {
|
||||
cm := make(map[string]map[string]*DBColumn, len(tables))
|
||||
|
||||
for i, t := range tables {
|
||||
cols := columns[i]
|
||||
cm[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for n, c := range cols {
|
||||
cm[t.Key][c.Key] = &columns[i][n]
|
||||
}
|
||||
}
|
||||
|
||||
return cm
|
||||
}
|
||||
|
||||
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
|
||||
t.ID = di.Tables[len(di.Tables)-1].ID
|
||||
|
||||
di.Tables = append(di.Tables, t)
|
||||
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
di.colMap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
cols[i].ID = int16(i)
|
||||
c := &cols[i]
|
||||
di.colmap[t.Key][c.Key] = c
|
||||
di.colMap[t.Key][c.Key] = c
|
||||
}
|
||||
di.Columns = append(di.Columns, cols)
|
||||
}
|
||||
|
||||
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
|
||||
v, ok := di.colmap[strings.ToLower(table)][strings.ToLower(column)]
|
||||
v, ok := di.colMap[strings.ToLower(table)][strings.ToLower(column)]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
@ -237,6 +253,71 @@ ORDER BY id;`
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
type DBFunction struct {
|
||||
Name string
|
||||
Params []DBFuncParam
|
||||
}
|
||||
|
||||
type DBFuncParam struct {
|
||||
ID int
|
||||
Name sql.NullString
|
||||
Type string
|
||||
}
|
||||
|
||||
func GetFunctions(db *sql.DB) ([]DBFunction, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
routines.routine_name,
|
||||
parameters.specific_name,
|
||||
parameters.data_type,
|
||||
parameters.parameter_name,
|
||||
parameters.ordinal_position
|
||||
FROM
|
||||
information_schema.routines
|
||||
RIGHT JOIN
|
||||
information_schema.parameters
|
||||
ON (routines.specific_name = parameters.specific_name and parameters.ordinal_position IS NOT NULL)
|
||||
WHERE
|
||||
routines.specific_schema = 'public'
|
||||
ORDER BY
|
||||
routines.routine_name, parameters.ordinal_position;`
|
||||
|
||||
rows, err := db.Query(sqlStmt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching functions: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var funcs []DBFunction
|
||||
fm := make(map[string]int)
|
||||
|
||||
parameterIndex := 1
|
||||
for rows.Next() {
|
||||
var fn, fid string
|
||||
fp := DBFuncParam{}
|
||||
|
||||
err = rows.Scan(&fn, &fid, &fp.Type, &fp.Name, &fp.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !fp.Name.Valid {
|
||||
fp.Name.String = string(parameterIndex)
|
||||
fp.Name.Valid = true
|
||||
}
|
||||
|
||||
if i, ok := fm[fid]; ok {
|
||||
funcs[i].Params = append(funcs[i].Params, fp)
|
||||
} else {
|
||||
funcs = append(funcs, DBFunction{Name: fn, Params: []DBFuncParam{fp}})
|
||||
fm[fid] = len(funcs) - 1
|
||||
}
|
||||
parameterIndex++
|
||||
}
|
||||
|
||||
return funcs, nil
|
||||
}
|
||||
|
||||
// func GetValType(type string) qcode.ValType {
|
||||
// switch {
|
||||
// case "bigint", "integer", "smallint", "numeric", "bigserial":
|
||||
|
@ -1,11 +1,10 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getTestSchema() *DBSchema {
|
||||
func GetTestDBInfo() *DBInfo {
|
||||
tables := []DBTable{
|
||||
DBTable{Name: "customers", Type: "table"},
|
||||
DBTable{Name: "users", Type: "table"},
|
||||
@ -74,36 +73,19 @@ func getTestSchema() *DBSchema {
|
||||
}
|
||||
}
|
||||
|
||||
schema := &DBSchema{
|
||||
ver: 110000,
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
return &DBInfo{
|
||||
Version: 110000,
|
||||
Tables: tables,
|
||||
Columns: columns,
|
||||
Functions: []DBFunction{},
|
||||
colMap: newColMap(tables, columns),
|
||||
}
|
||||
}
|
||||
|
||||
func GetTestSchema() (*DBSchema, error) {
|
||||
aliases := map[string][]string{
|
||||
"users": []string{"mes"},
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.addTable(t, columns[i], aliases)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.firstDegreeRels(t, columns[i])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.secondDegreeRels(t, columns[i])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return schema
|
||||
return NewDBSchema(GetTestDBInfo(), aliases)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package psql
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
17
core/internal/qcode/bench.9
Normal file
17
core/internal/qcode/bench.9
Normal file
@ -0,0 +1,17 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile
|
||||
BenchmarkQCompile-16 129614 8649 ns/op 3756 B/op 28 allocs/op
|
||||
BenchmarkQCompileP
|
||||
BenchmarkQCompileP-16 487488 2525 ns/op 3792 B/op 28 allocs/op
|
||||
BenchmarkParse
|
||||
BenchmarkParse-16 127582 8731 ns/op 3902 B/op 18 allocs/op
|
||||
BenchmarkParseP
|
||||
BenchmarkParseP-16 561373 2223 ns/op 3903 B/op 18 allocs/op
|
||||
BenchmarkSchemaParse
|
||||
BenchmarkSchemaParse-16 209142 5523 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP
|
||||
BenchmarkSchemaParseP-16 716437 1734 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 8.483s
|
@ -7,7 +7,8 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Blocklist []string
|
||||
Blocklist []string
|
||||
DefaultBlock bool
|
||||
}
|
||||
|
||||
type QueryConfig struct {
|
||||
|
@ -602,7 +602,7 @@ func (t parserType) String() string {
|
||||
// nodePool.Put(n)
|
||||
// freeList = append(freeList, Frees{n, loc})
|
||||
// } else {
|
||||
// fmt.Printf(">>>>(%d) RE_FREE %d %p %s %s\n", loc, freeList[j].loc, freeList[j].n, n.Name, n.Type)
|
||||
// fmt.Printf("(%d) RE_FREE %d %p %s %s\n", loc, freeList[j].loc, freeList[j].n, n.Name, n.Type)
|
||||
// }
|
||||
// }
|
||||
|
||||
|
@ -2,6 +2,7 @@ package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/chirino/graphql/schema"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -130,7 +131,7 @@ updateThread {
|
||||
}
|
||||
|
||||
var gql = []byte(`
|
||||
products(
|
||||
{products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
@ -148,7 +149,7 @@ var gql = []byte(`
|
||||
id
|
||||
name
|
||||
price
|
||||
}`)
|
||||
}}`)
|
||||
|
||||
func BenchmarkQCompile(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
@ -181,3 +182,59 @@ func BenchmarkQCompileP(b *testing.B) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := Parse(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseP(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := Parse(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSchemaParse(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
doc := schema.QueryDocument{}
|
||||
err := doc.Parse(string(gql))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSchemaParseP(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
doc := schema.QueryDocument{}
|
||||
err := doc.Parse(string(gql))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -170,6 +170,7 @@ const (
|
||||
)
|
||||
|
||||
type Compiler struct {
|
||||
db bool // default block tables if not defined in anon role
|
||||
tr map[string]map[string]*trval
|
||||
bl map[string]struct{}
|
||||
}
|
||||
@ -179,7 +180,7 @@ var expPool = sync.Pool{
|
||||
}
|
||||
|
||||
func NewCompiler(c Config) (*Compiler, error) {
|
||||
co := &Compiler{}
|
||||
co := &Compiler{db: c.DefaultBlock}
|
||||
co.tr = make(map[string]map[string]*trval)
|
||||
co.bl = make(map[string]struct{}, len(c.Blocklist))
|
||||
|
||||
@ -413,12 +414,12 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
|
||||
func (com *Compiler) AddFilters(qc *QCode, sel *Select, role string) {
|
||||
var fil *Exp
|
||||
var nu bool
|
||||
var nu bool // user required (or not) in this filter
|
||||
|
||||
if trv, ok := com.tr[role][sel.Name]; ok {
|
||||
fil, nu = trv.filter(qc.Type)
|
||||
|
||||
} else if role == "anon" {
|
||||
} else if com.db && role == "anon" {
|
||||
// Tables not defined under the anon role will not be rendered
|
||||
sel.SkipRender = true
|
||||
}
|
||||
|
490
core/introspec.go
Normal file
490
core/introspec.go
Normal file
@ -0,0 +1,490 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/chirino/graphql"
|
||||
"github.com/chirino/graphql/resolvers"
|
||||
"github.com/chirino/graphql/schema"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
var typeMap map[string]string = map[string]string{
|
||||
"smallint": "Int",
|
||||
"integer": "Int",
|
||||
"bigint": "Int",
|
||||
"smallserial": "Int",
|
||||
"serial": "Int",
|
||||
"bigserial": "Int",
|
||||
"decimal": "Float",
|
||||
"numeric": "Float",
|
||||
"real": "Float",
|
||||
"double precision": "Float",
|
||||
"money": "Float",
|
||||
"boolean": "Boolean",
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initGraphQLEgine() error {
|
||||
engine := graphql.New()
|
||||
engineSchema := engine.Schema
|
||||
dbSchema := sg.schema
|
||||
|
||||
if err := engineSchema.Parse(`enum OrderDirection { asc desc }`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gqltype := func(col psql.DBColumn) schema.Type {
|
||||
typeName := typeMap[strings.ToLower(col.Type)]
|
||||
if typeName == "" {
|
||||
typeName = "String"
|
||||
}
|
||||
var t schema.Type = &schema.TypeName{Name: typeName}
|
||||
if col.NotNull {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
query := &schema.Object{
|
||||
Name: "Query",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
mutation := &schema.Object{
|
||||
Name: "Mutation",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[query.Name] = query
|
||||
engineSchema.Types[mutation.Name] = mutation
|
||||
engineSchema.EntryPoints[schema.Query] = query
|
||||
engineSchema.EntryPoints[schema.Mutation] = mutation
|
||||
|
||||
//validGraphQLIdentifierRegex := regexp.MustCompile(`^[A-Za-z_][A-Za-z_0-9]*$`)
|
||||
|
||||
scalarExpressionTypesNeeded := map[string]bool{}
|
||||
tableNames := dbSchema.GetTableNames()
|
||||
funcs := dbSchema.GetFunctions()
|
||||
|
||||
for _, table := range tableNames {
|
||||
ti, err := dbSchema.GetTable(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ti.IsSingular {
|
||||
continue
|
||||
}
|
||||
|
||||
singularName := ti.Singular
|
||||
// if !validGraphQLIdentifierRegex.MatchString(singularName) {
|
||||
// return errors.New("table name is not a valid GraphQL identifier: " + singularName)
|
||||
// }
|
||||
pluralName := ti.Plural
|
||||
// if !validGraphQLIdentifierRegex.MatchString(pluralName) {
|
||||
// return errors.New("table name is not a valid GraphQL identifier: " + pluralName)
|
||||
// }
|
||||
|
||||
outputType := &schema.Object{
|
||||
Name: singularName + "Output",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[outputType.Name] = outputType
|
||||
|
||||
inputType := &schema.InputObject{
|
||||
Name: singularName + "Input",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[inputType.Name] = inputType
|
||||
|
||||
orderByType := &schema.InputObject{
|
||||
Name: singularName + "OrderBy",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[orderByType.Name] = orderByType
|
||||
|
||||
expressionTypeName := singularName + "Expression"
|
||||
expressionType := &schema.InputObject{
|
||||
Name: expressionTypeName,
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: "and",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "or",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
|
||||
for _, col := range ti.Columns {
|
||||
colName := col.Name
|
||||
// if !validGraphQLIdentifierRegex.MatchString(colName) {
|
||||
// return errors.New("column name is not a valid GraphQL identifier: " + colName)
|
||||
// }
|
||||
|
||||
colType := gqltype(col)
|
||||
nullableColType := ""
|
||||
if x, ok := colType.(*schema.NonNull); ok {
|
||||
nullableColType = x.OfType.(*schema.TypeName).Name
|
||||
} else {
|
||||
nullableColType = colType.(*schema.TypeName).Name
|
||||
}
|
||||
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
|
||||
for _, f := range funcs {
|
||||
if col.Type != f.Params[0].Type {
|
||||
continue
|
||||
}
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: f.Name + "_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
// If it's a numeric type...
|
||||
if nullableColType == "Float" || nullableColType == "Int" {
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "avg_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "count_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "max_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "min_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "variance_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
inputType.Fields = append(inputType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
orderByType.Fields = append(orderByType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "OrderDirection"}},
|
||||
})
|
||||
|
||||
scalarExpressionTypesNeeded[nullableColType] = true
|
||||
|
||||
expressionType.Fields = append(expressionType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: nullableColType + "Expression"}},
|
||||
})
|
||||
}
|
||||
|
||||
outputTypeName := &schema.TypeName{Name: outputType.Name}
|
||||
inputTypeName := &schema.TypeName{Name: inputType.Name}
|
||||
pluralOutputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: outputType.Name}}}}
|
||||
pluralInputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: inputType.Name}}}}
|
||||
|
||||
args := schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: "To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."},
|
||||
Name: "order_by",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: orderByType.Name}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "where",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionType.Name}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "limit",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "offset",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "first",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "last",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "before",
|
||||
Type: &schema.TypeName{Name: "String"},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "after",
|
||||
Type: &schema.TypeName{Name: "String"},
|
||||
},
|
||||
}
|
||||
if ti.PrimaryCol != nil {
|
||||
t := gqltype(*ti.PrimaryCol)
|
||||
if _, ok := t.(*schema.NonNull); !ok {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: schema.Description{Text: "Finds the record by the primary key"},
|
||||
Name: "id",
|
||||
Type: t,
|
||||
})
|
||||
}
|
||||
|
||||
if ti.TSVCol != nil {
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: schema.Description{Text: "Performs full text search using a TSV index"},
|
||||
Name: "search",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
})
|
||||
}
|
||||
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: singularName,
|
||||
Type: outputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: pluralName,
|
||||
Type: pluralOutputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
|
||||
mutationArgs := append(args, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "insert",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "update",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "upsert",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
}...)
|
||||
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: singularName,
|
||||
Args: mutationArgs,
|
||||
Type: outputType,
|
||||
})
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: pluralName,
|
||||
Args: append(mutationArgs, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "inserts",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "updates",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "upserts",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
}...),
|
||||
Type: outputType,
|
||||
})
|
||||
}
|
||||
|
||||
for typeName := range scalarExpressionTypesNeeded {
|
||||
expressionType := &schema.InputObject{
|
||||
Name: typeName + "Expression",
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: "eq",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "neq",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "gt",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "greater_than",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lt",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lesser_than",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "gte",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "greater_or_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lte",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lesser_or_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "in",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nin",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_in",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Name: "like",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nlike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_like",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "ilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_ilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "similar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nsimilar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_similar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key_any",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key_all",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "contains",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "contained_in",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "is_null",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Boolean"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
}
|
||||
|
||||
if err := engineSchema.ResolveTypes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
engine.Resolver = resolvers.Func(func(request *resolvers.ResolveRequest, next resolvers.Resolution) resolvers.Resolution {
|
||||
resolver := resolvers.MetadataResolver.Resolve(request, next)
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
resolver = resolvers.MethodResolver.Resolve(request, next) // needed by the MetadataResolver
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
sg.ge = engine
|
||||
return nil
|
||||
}
|
@ -3,7 +3,7 @@ package core
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@ -122,7 +122,7 @@ func (sg *SuperGraph) prepareStmt(item allow.Item) error {
|
||||
|
||||
stmts, err := sg.buildRoleStmt(qb, vars, role.Name)
|
||||
if err == psql.ErrAllTablesSkipped {
|
||||
return nil
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -251,7 +251,7 @@ func (sg *SuperGraph) initAllowList() error {
|
||||
|
||||
// nolint: errcheck
|
||||
func stmtHash(name string, role string) string {
|
||||
h := sha1.New()
|
||||
h := sha256.New()
|
||||
io.WriteString(h, strings.ToLower(name))
|
||||
io.WriteString(h, role)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
|
@ -120,20 +120,20 @@ func buildFn(r Remote) func(http.Header, []byte) ([]byte, error) {
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if r.Debug {
|
||||
// reqDump, err := httputil.DumpRequestOut(req, true)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if r.Debug {
|
||||
// reqDump, err := httputil.DumpRequestOut(req, true)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// resDump, err := httputil.DumpResponse(res, true)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// resDump, err := httputil.DumpResponse(res, true)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// logger.Debug().Msgf("Remote Request Debug:\n%s\n%s",
|
||||
// reqDump, resDump)
|
||||
}
|
||||
// logger.Debug().Msgf("Remote Request Debug:\n%s\n%s",
|
||||
// reqDump, resDump)
|
||||
// }
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return nil,
|
||||
|
@ -104,7 +104,7 @@ query {
|
||||
</div>
|
||||
|
||||
<div class="text-2xl md:text-3xl">
|
||||
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making ineffiient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
|
||||
Super Graph is a library and service that fetches data from any Postgres database using just GraphQL. No more struggling with ORMs and SQL to wrangle data out of the database. No more having to figure out the right joins or making inefficient queries. However complex the GraphQL, Super Graph will always generate just one single efficient SQL query. The goal is to save you time and money so you can focus on you're apps core value.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -145,17 +145,12 @@ import (
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
conf, err := config.NewConfig("./config")
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
}
|
||||
|
||||
sg, err = core.NewSuperGraph(conf, db)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
graphqlQuery := `
|
||||
@ -168,7 +163,7 @@ func main() {
|
||||
|
||||
res, err := sg.GraphQL(context.Background(), graphqlQuery, nil)
|
||||
if err != nil {
|
||||
log.Fatalf(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
|
@ -10,7 +10,7 @@ longTagline: Get an instant high performance GraphQL API for Postgres. No code n
|
||||
actionText: Get Started, Free, Open Source →
|
||||
actionLink: /guide
|
||||
|
||||
description: Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
|
||||
description: Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on your apps core value.
|
||||
|
||||
features:
|
||||
- title: Simple
|
||||
|
@ -32,7 +32,7 @@ For this to work you have to ensure that the option `:domain => :all` is added t
|
||||
|
||||
### With an NGINX loadbalancer
|
||||
|
||||
If you're infrastructure is fronted by NGINX then it should be configured so that all requests to your GraphQL API path are proxyed to Super Graph. In the example NGINX config below all requests to the path `/api/v1/graphql` are routed to wherever you have Super Graph installed within your architecture. This example is derived from the config file example at [/microservices-nginx-gateway/nginx.conf](https://github.com/launchany/microservices-nginx-gateway/blob/master/nginx.conf)
|
||||
If your infrastructure is fronted by NGINX then it should be configured so that all requests to your GraphQL API path are proxyed to Super Graph. In the example NGINX config below all requests to the path `/api/v1/graphql` are routed to wherever you have Super Graph installed within your architecture. This example is derived from the config file example at [/microservices-nginx-gateway/nginx.conf](https://github.com/launchany/microservices-nginx-gateway/blob/master/nginx.conf)
|
||||
|
||||
::: tip NGINX with sub-domain
|
||||
Yes, NGINX is very flexible and you can configure it to keep Super Graph a subdomain instead of on the same top level domain. I'm sure a little Googleing will get you some great example configs for that.
|
||||
|
@ -730,6 +730,32 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
### Custom Functions
|
||||
|
||||
Any function defined in the database like the below `add_five` that adds 5 to any number given to it can be used
|
||||
within your query. The one limitation is that it should be a function that only accepts a single argument. The function is used within you're GraphQL in similar way to how aggregrations are used above. Example below
|
||||
|
||||
```grahql
|
||||
query {
|
||||
thread(id: 5) {
|
||||
id
|
||||
total_votes
|
||||
add_five_total_votes
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Postgres user-defined function `add_five`
|
||||
```
|
||||
CREATE OR REPLACE FUNCTION add_five(a integer) RETURNS integer AS $$
|
||||
BEGIN
|
||||
|
||||
RETURN a + 5;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
```
|
||||
|
||||
|
||||
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
|
||||
|
||||
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
|
||||
@ -1043,7 +1069,7 @@ mutation {
|
||||
|
||||
### Pagination
|
||||
|
||||
This is a must have feature of any API. When you want your users to go thought a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
|
||||
This is a must have feature of any API. When you want your users to go through a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
|
||||
|
||||
Limit-Offset
|
||||
This is simple enough but also inefficient when working with a large number of total items. Limit, limits the number of items fetched and offset is the point you want to fetch from. The below query will fetch 10 results at a time starting with the 100th item. You will have to keep updating offset (110, 120, 130, etc ) to walk thought the results so make offset a variable.
|
||||
@ -1059,7 +1085,7 @@ query {
|
||||
```
|
||||
|
||||
#### Cursor
|
||||
This is a powerful and highly efficient way to paginate though a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward of backward though the results. If you plan to implement infinite scroll this is the option you should choose.
|
||||
This is a powerful and highly efficient way to paginate a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward or backward through the results. If you plan to implement infinite scroll this is the option you should choose.
|
||||
|
||||
When going this route the results will contain a cursor value this is an encrypted string that you don't have to worry about just pass this back in to the next API call and you'll received the next set of results. The cursor value is encrypted since its contents should only matter to Super Graph and not the client. Also since the primary key is used for this feature it's possible you might not want to leak it's value to clients.
|
||||
|
||||
|
6
go.mod
6
go.mod
@ -1,17 +1,18 @@
|
||||
module github.com/dosco/super-graph
|
||||
|
||||
require (
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1
|
||||
github.com/GeertJohan/go.rice v1.0.0
|
||||
github.com/NYTimes/gziphandler v1.1.1
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
||||
github.com/brianvoe/gofakeit/v5 v5.2.0
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/chirino/graphql v0.0.0-20200419184546-f015b9dab85d
|
||||
github.com/chirino/graphql v0.0.0-20200430165312-293648399b1a
|
||||
github.com/daaku/go.zipexe v1.0.1 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/dlclark/regexp2 v1.2.0 // indirect
|
||||
github.com/dop251/goja v0.0.0-20200414142002-77e84ffb8c65
|
||||
github.com/dop251/goja v0.0.0-20200424152103-d0b8fda54cd0
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/garyburd/redigo v1.6.0
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
@ -33,7 +34,6 @@ require (
|
||||
github.com/valyala/fasttemplate v1.1.0
|
||||
go.uber.org/zap v1.14.1
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
|
19
go.sum
19
go.sum
@ -1,6 +1,8 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
|
||||
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
|
||||
github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=
|
||||
@ -25,8 +27,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chirino/graphql v0.0.0-20200419184546-f015b9dab85d h1:JnYHwwRhFmQ8DeyfqmIrzpkkxnZ+iT5V1CUd3Linin0=
|
||||
github.com/chirino/graphql v0.0.0-20200419184546-f015b9dab85d/go.mod h1:+34LPrbHFfKVDPsNfi445UArMEjbeTlCm7C+OpdC7IU=
|
||||
github.com/chirino/graphql v0.0.0-20200430165312-293648399b1a h1:WVu7r2vwlrBVmunbSSU+9/3M3AgsQyhE49CKDjHiFq4=
|
||||
github.com/chirino/graphql v0.0.0-20200430165312-293648399b1a/go.mod h1:wQjjxFMFyMlsWh4Z3nMuHQtevD4Ul9UVQSnz1JOLuP8=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
@ -53,8 +55,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
|
||||
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/dop251/goja v0.0.0-20200414142002-77e84ffb8c65 h1:Nud597JuGCF/MScrb6NNVDRgmuk8X7w3pFc5GvSsm5E=
|
||||
github.com/dop251/goja v0.0.0-20200414142002-77e84ffb8c65/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
|
||||
github.com/dop251/goja v0.0.0-20200424152103-d0b8fda54cd0 h1:EfFAcaAwGai/wlDCWwIObHBm3T2C2CCPX/SaS0fpOJ4=
|
||||
github.com/dop251/goja v0.0.0-20200424152103-d0b8fda54cd0/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
|
||||
github.com/friendsofgo/graphiql v0.2.2/go.mod h1:8Y2kZ36AoTGWs78+VRpvATyt3LJBx0SZXmay80ZTRWo=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@ -88,7 +90,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gosimple/slug v1.9.0 h1:r5vDcYrFz9BmfIAMC829un9hq7hKM4cHUrsv36LbEqs=
|
||||
github.com/gosimple/slug v1.9.0/go.mod h1:AMZ+sOVe65uByN3kgEyf9WEBKBCSS+dJjMX9x4vDJbg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@ -187,8 +189,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI=
|
||||
@ -328,8 +330,6 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -350,7 +350,6 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
|
@ -23,7 +23,7 @@ func newAction(a *Action) (http.Handler, error) {
|
||||
|
||||
httpFn := func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := fn(w, r); err != nil {
|
||||
renderErr(w, err, nil)
|
||||
renderErr(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,6 +45,8 @@ type Serv struct {
|
||||
MigrationsPath string `mapstructure:"migrations_path"`
|
||||
AllowedOrigins []string `mapstructure:"cors_allowed_origins"`
|
||||
DebugCORS bool `mapstructure:"cors_debug"`
|
||||
APIPath string `mapstructure:"api_path"`
|
||||
CacheControl string `mapstructure:"cache_control"`
|
||||
|
||||
Auth auth.Auth
|
||||
Auths []auth.Auth
|
||||
|
@ -26,13 +26,12 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
log *_log.Logger // logger
|
||||
zlog *zap.Logger // fast logger
|
||||
logLevel int // log level
|
||||
conf *Config // parsed config
|
||||
confPath string // path to the config file
|
||||
db *sql.DB // database connection pool
|
||||
secretKey [32]byte // encryption key
|
||||
log *_log.Logger // logger
|
||||
zlog *zap.Logger // fast logger
|
||||
logLevel int // log level
|
||||
conf *Config // parsed config
|
||||
confPath string // path to the config file
|
||||
db *sql.DB // database connection pool
|
||||
)
|
||||
|
||||
func Cmd() {
|
||||
|
@ -109,7 +109,7 @@ func cmdDBNew(cmd *cobra.Command, args []string) {
|
||||
|
||||
// Write new migration
|
||||
mpath := filepath.Join(migrationsPath, mname)
|
||||
mfile, err := os.OpenFile(mpath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)
|
||||
mfile, err := os.OpenFile(mpath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
log.Fatalf("ERR %s", err)
|
||||
}
|
||||
|
@ -415,6 +415,7 @@ func setFakeFuncs(f *goja.Object) {
|
||||
//f.Set("programming_language", gofakeit.ProgrammingLanguage)
|
||||
}
|
||||
|
||||
//nolint: errcheck
|
||||
func setUtilFuncs(f *goja.Object) {
|
||||
// Slugs
|
||||
f.Set("make_slug", slug.Make)
|
||||
|
@ -29,7 +29,7 @@ type gqlReq struct {
|
||||
}
|
||||
|
||||
type errorResp struct {
|
||||
Error error `json:"error"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func apiV1Handler() http.Handler {
|
||||
@ -55,13 +55,13 @@ func apiV1(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
//nolint: errcheck
|
||||
if conf.AuthFailBlock && !auth.IsAuth(ct) {
|
||||
renderErr(w, errUnauthorized, nil)
|
||||
renderErr(w, errUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(io.LimitReader(r.Body, maxReadBytes))
|
||||
if err != nil {
|
||||
renderErr(w, err, nil)
|
||||
renderErr(w, err)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
@ -70,14 +70,14 @@ func apiV1(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
err = json.Unmarshal(b, &req)
|
||||
if err != nil {
|
||||
renderErr(w, err, nil)
|
||||
renderErr(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
doLog := true
|
||||
res, err := sg.GraphQL(ct, req.Query, req.Vars)
|
||||
|
||||
if !conf.Production && res.QueryName() == "IntrospectionQuery" {
|
||||
if !conf.Production && res.QueryName() == introspectionQuery {
|
||||
doLog = false
|
||||
}
|
||||
|
||||
@ -85,39 +85,42 @@ func apiV1(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("DBG query %s: %s", res.QueryName(), res.SQL())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
renderErr(w, err, res)
|
||||
return
|
||||
if err == nil {
|
||||
if len(conf.CacheControl) != 0 && res.Operation() == core.OpQuery {
|
||||
w.Header().Set("Cache-Control", conf.CacheControl)
|
||||
}
|
||||
//nolint: errcheck
|
||||
json.NewEncoder(w).Encode(res)
|
||||
|
||||
if doLog && logLevel >= LogLevelInfo {
|
||||
zlog.Info("success",
|
||||
zap.String("op", res.OperationName()),
|
||||
zap.String("name", res.QueryName()),
|
||||
zap.String("role", res.Role()),
|
||||
)
|
||||
}
|
||||
|
||||
} else {
|
||||
renderErr(w, err)
|
||||
|
||||
if doLog && logLevel >= LogLevelInfo {
|
||||
zlog.Error("error",
|
||||
zap.String("op", res.OperationName()),
|
||||
zap.String("name", res.QueryName()),
|
||||
zap.String("role", res.Role()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(res)
|
||||
|
||||
if doLog && logLevel >= LogLevelInfo {
|
||||
zlog.Info("success",
|
||||
zap.String("op", res.Operation()),
|
||||
zap.String("name", res.QueryName()),
|
||||
zap.String("role", res.Role()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//nolint: errcheck
|
||||
func renderErr(w http.ResponseWriter, err error, res *core.Result) {
|
||||
func renderErr(w http.ResponseWriter, err error) {
|
||||
if err == errUnauthorized {
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(&errorResp{err})
|
||||
|
||||
if logLevel >= LogLevelError {
|
||||
if res != nil {
|
||||
zlog.Error(err.Error(),
|
||||
zap.String("op", res.Operation()),
|
||||
zap.String("name", res.QueryName()),
|
||||
zap.String("role", res.Role()),
|
||||
)
|
||||
} else {
|
||||
zlog.Error(err.Error())
|
||||
}
|
||||
}
|
||||
json.NewEncoder(w).Encode(errorResp{err.Error()})
|
||||
}
|
||||
|
@ -100,6 +100,9 @@ func initConf() (*Config, error) {
|
||||
c.UseAllowList = true
|
||||
}
|
||||
|
||||
// In anon role block all tables that are not defined in the role
|
||||
c.DefaultBlock = true
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
@ -1,37 +0,0 @@
|
||||
package serv
|
||||
|
||||
import "net/http"
|
||||
|
||||
//nolint: errcheck
|
||||
func introspect(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{
|
||||
"data": {
|
||||
"__schema": {
|
||||
"queryType": {
|
||||
"name": "Query"
|
||||
},
|
||||
"mutationType": null,
|
||||
"subscriptionType": null
|
||||
}
|
||||
},
|
||||
"extensions":{
|
||||
"tracing":{
|
||||
"version":1,
|
||||
"startTime":"2019-06-04T19:53:31.093Z",
|
||||
"endTime":"2019-06-04T19:53:31.108Z",
|
||||
"duration":15219720,
|
||||
"execution": {
|
||||
"resolvers": [{
|
||||
"path": ["__schema"],
|
||||
"parentType": "Query",
|
||||
"fieldName": "__schema",
|
||||
"returnType": "__Schema!",
|
||||
"startOffset": 50950,
|
||||
"duration": 17187
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}`))
|
||||
}
|
@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -111,9 +112,15 @@ func routeHandler() (http.Handler, error) {
|
||||
return mux, nil
|
||||
}
|
||||
|
||||
apiRoute := "/api/v1/graphql"
|
||||
|
||||
if len(conf.APIPath) != 0 {
|
||||
apiRoute = path.Join("/", conf.APIPath, "/v1/graphql")
|
||||
}
|
||||
|
||||
routes := map[string]http.Handler{
|
||||
"/health": http.HandlerFunc(health),
|
||||
"/api/v1/graphql": apiV1Handler(),
|
||||
"/health": http.HandlerFunc(health),
|
||||
apiRoute: apiV1Handler(),
|
||||
}
|
||||
|
||||
if err := setActionRoutes(routes); err != nil {
|
||||
@ -137,6 +144,7 @@ func routeHandler() (http.Handler, error) {
|
||||
|
||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Server", serverName)
|
||||
w.Header().Set("Content-type", "application/json")
|
||||
mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,13 @@ cors_allowed_origins: ["*"]
|
||||
# Debug Cross Origin Resource Sharing requests
|
||||
cors_debug: false
|
||||
|
||||
# Default API path prefix is /api you can change it if you like
|
||||
# api_path: "/data"
|
||||
|
||||
# Cache-Control header can help cache queries if your CDN supports cache-control
|
||||
# on POST requests (does not work with not mutations)
|
||||
# cache_control: "public, max-age=300, s-maxage=600"
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
|
@ -49,6 +49,13 @@ reload_on_config_change: false
|
||||
# Debug Cross Origin Resource Sharing requests
|
||||
# cors_debug: false
|
||||
|
||||
# Default API path prefix is /api you can change it if you like
|
||||
# api_path: "/data"
|
||||
|
||||
# Cache-Control header can help cache queries if your CDN supports cache-control
|
||||
# on POST requests (does not work with not mutations)
|
||||
# cache_control: "public, max-age=300, s-maxage=600"
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
|
@ -2,7 +2,7 @@ package serv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"os"
|
||||
@ -16,7 +16,7 @@ import (
|
||||
// nolint: errcheck
|
||||
func gqlHash(b string, vars []byte, role string) string {
|
||||
b = strings.TrimSpace(b)
|
||||
h := sha1.New()
|
||||
h := sha256.New()
|
||||
query := "query"
|
||||
|
||||
s, e := 0, 0
|
||||
|
112
jsn/clear.go
Normal file
112
jsn/clear.go
Normal file
@ -0,0 +1,112 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Clear function wipes all scalar values from the json including those directly in an array
|
||||
func Clear(w *bytes.Buffer, v []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(v))
|
||||
|
||||
st := newIntStack()
|
||||
isValue := false
|
||||
inArray := false
|
||||
n := 0
|
||||
|
||||
for {
|
||||
var t json.Token
|
||||
var err error
|
||||
|
||||
if t, err = dec.Token(); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch v1 := t.(type) {
|
||||
case int:
|
||||
if isValue && !inArray {
|
||||
w.WriteByte('0')
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
|
||||
case float64:
|
||||
if isValue && !inArray {
|
||||
w.WriteString(`0.0`)
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
|
||||
case bool:
|
||||
if isValue && !inArray {
|
||||
w.WriteString(`false`)
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
|
||||
case json.Number:
|
||||
if isValue && !inArray {
|
||||
w.WriteString(`0`)
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
|
||||
case nil:
|
||||
if isValue && !inArray {
|
||||
w.WriteString(`null`)
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
|
||||
case string:
|
||||
if !isValue {
|
||||
if n != 0 {
|
||||
w.WriteByte(',')
|
||||
}
|
||||
|
||||
io := int(dec.InputOffset())
|
||||
w.Write(v[io-len(v1)-2 : io])
|
||||
w.WriteString(`:`)
|
||||
isValue = true
|
||||
|
||||
} else if !inArray {
|
||||
w.WriteString(`""`)
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
|
||||
case json.Delim:
|
||||
switch t.(json.Delim) {
|
||||
case '[':
|
||||
st.Push(n)
|
||||
inArray = true
|
||||
n = 0
|
||||
case ']':
|
||||
n = st.Pop()
|
||||
inArray = false
|
||||
isValue = false
|
||||
n++
|
||||
case '{':
|
||||
if n != 0 && !isValue {
|
||||
w.WriteByte(',')
|
||||
}
|
||||
st.Push(n)
|
||||
inArray = false
|
||||
isValue = false
|
||||
n = 0
|
||||
case '}':
|
||||
n = st.Pop()
|
||||
isValue = false
|
||||
n++
|
||||
}
|
||||
w.WriteByte(v[dec.InputOffset()-1])
|
||||
}
|
||||
|
||||
dec.More()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
47
jsn/intstack.go
Normal file
47
jsn/intstack.go
Normal file
@ -0,0 +1,47 @@
|
||||
package jsn
|
||||
|
||||
type intStack struct {
|
||||
stA [20]int
|
||||
st []int
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new intStack
|
||||
func newIntStack() *intStack {
|
||||
s := &intStack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the intStack
|
||||
func (s *intStack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the intStack
|
||||
func (s *intStack) Peek() int {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
return s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the intStack and return it
|
||||
func (s *intStack) Pop() int {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s.top--
|
||||
return s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the intStack
|
||||
func (s *intStack) Push(value int) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
@ -509,6 +509,34 @@ func TestKeys3(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
json := `{
|
||||
"insert": {
|
||||
"created_at": "now",
|
||||
"test_1a": { "type1": "a", "type2": [{ "a": 2 }] },
|
||||
"name": "Hello",
|
||||
"updated_at": "now",
|
||||
"description": "World"
|
||||
},
|
||||
"user": 123,
|
||||
"tags": [1, 2, "what"]
|
||||
}`
|
||||
|
||||
expected := `{"insert":{"created_at":"","test_1a":{"type1":"","type2":[{"a":0.0}]},"name":"","updated_at":"","description":""},"user":0.0,"tags":[]}`
|
||||
|
||||
err := Clear(&buf, []byte(json))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.String() != expected {
|
||||
t.Log(buf.String())
|
||||
t.Error("Does not match expected json")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGet(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
|
@ -10,7 +10,7 @@ func Keys(b []byte) [][]byte {
|
||||
var k []byte
|
||||
state := expectValue
|
||||
|
||||
st := newStack()
|
||||
st := newSkipInfoStack()
|
||||
ae := 0
|
||||
instr := false
|
||||
slash := 0
|
||||
|
51
jsn/sistack.go
Normal file
51
jsn/sistack.go
Normal file
@ -0,0 +1,51 @@
|
||||
package jsn
|
||||
|
||||
type skipInfo struct {
|
||||
ss, se int
|
||||
}
|
||||
|
||||
type siStack struct {
|
||||
stA [20]skipInfo
|
||||
st []skipInfo
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new siStack
|
||||
func newSkipInfoStack() *siStack {
|
||||
s := &siStack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the siStack
|
||||
func (s *siStack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the siStack
|
||||
func (s *siStack) Peek() *skipInfo {
|
||||
if s.top == -1 {
|
||||
return nil
|
||||
}
|
||||
return &s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the siStack and return it
|
||||
func (s *siStack) Pop() *skipInfo {
|
||||
if s.top == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.top--
|
||||
return &s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the siStack
|
||||
func (s *siStack) Push(value skipInfo) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
51
jsn/stack.go
51
jsn/stack.go
@ -1,51 +0,0 @@
|
||||
package jsn
|
||||
|
||||
type skipInfo struct {
|
||||
ss, se int
|
||||
}
|
||||
|
||||
type stack struct {
|
||||
stA [20]skipInfo
|
||||
st []skipInfo
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new stack
|
||||
func newStack() *stack {
|
||||
s := &stack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the stack
|
||||
func (s *stack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the stack
|
||||
func (s *stack) Peek() *skipInfo {
|
||||
if s.top == -1 {
|
||||
return nil
|
||||
}
|
||||
return &s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the stack and return it
|
||||
func (s *stack) Pop() *skipInfo {
|
||||
if s.top == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.top--
|
||||
return &s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the stack
|
||||
func (s *stack) Push(value skipInfo) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user