Compare commits
119 Commits
Author | SHA1 | Date | |
---|---|---|---|
e102da839e | |||
68a378c00f | |||
d96eaf14f4 | |||
01e488b69d | |||
7a450b16ba | |||
1ad8cbf15b | |||
f69f1c67d5 | |||
a172193955 | |||
81338b6123 | |||
265b93b203 | |||
6c240e21b4 | |||
7930719eaa | |||
cc687b1b2b | |||
3033dcf1a9 | |||
0381982d19 | |||
2b0a798faa | |||
8b6c562ac1 | |||
a1fb89b762 | |||
c82a7bff0d | |||
7acf28bb3c | |||
be5d4e976a | |||
d1b884aec6 | |||
4be4ce860b | |||
dfa4caf540 | |||
7763251fb7 | |||
51e105699e | |||
90694f8803 | |||
ad82f5b267 | |||
99b37a9c50 | |||
7ec1f59224 | |||
d3ecb1d6cc | |||
aed4170e8e | |||
c33e93ab37 | |||
3d3e5d9c2b | |||
67b4a4d945 | |||
7413813138 | |||
12007db76e | |||
c85d379fe2 | |||
62fd1eac55 | |||
1a3d74e1ce | |||
3a4d885987 | |||
3bd9b199dd | |||
4ffa1483a4 | |||
52f3b1c7a2 | |||
2d466bfb12 | |||
a0b8907c3c | |||
8097ca3b8f | |||
0e498b0e94 | |||
3eb5b83070 | |||
e3c94d17d1 | |||
7240b27214 | |||
f37d867e32 | |||
5e75cc7b83 | |||
d4dca86267 | |||
76340ab008 | |||
3f5727c22b | |||
7c02226016 | |||
1e31e33707 | |||
0d0d63d8d1 | |||
c40ff38b05 | |||
7a5cf47486 | |||
5803395bd5 | |||
a40bd7fca5 | |||
343589c3bd | |||
482203ba05 | |||
6831d3f56f | |||
96ed3413fc | |||
bf4c496756 | |||
66055516d2 | |||
2d3e3cbae1 | |||
0e16eee93b | |||
679dd1fc83 | |||
3a14a644ce | |||
5da79d91bf | |||
5aceb337d6 | |||
5593c66996 | |||
0f9f8bbf0d | |||
cbfedb6fd2 | |||
37f2417c0b | |||
768e8774e7 | |||
9140e597e1 | |||
94593c9cce | |||
a96c211fe5 | |||
6d47f0df8e | |||
e82bdbed65 | |||
ef51b99fc7 | |||
9ebd03fa8c | |||
aff2a13ba4 | |||
f518d5fc69 | |||
7583326d21 | |||
8024a8420b | |||
6bd27d7c79 | |||
a4c09dedd5 | |||
176514b5f1 | |||
396f4bcfd8 | |||
51c5f037f4 | |||
c576f1ae16 | |||
0a6995eaca | |||
06ed823a51 | |||
3438c3efb9 | |||
605ec63466 | |||
c7e63a6200 | |||
89bc93e159 | |||
77a51924a7 | |||
0deb3596c5 | |||
b87ba1fcd0 | |||
8ae7210e70 | |||
f8aac8d4d7 | |||
34867a2733 | |||
4a8af69dd0 | |||
c74226208d | |||
6d2f334011 | |||
ff13f651d6 | |||
cabd2d81ae | |||
4edc15eb98 | |||
6bc66d28bc | |||
c797deb4d0 | |||
deb5b93c81 | |||
85a74ed30c |
49
.chglog/CHANGELOG.tpl.md
Executable file
49
.chglog/CHANGELOG.tpl.md
Executable file
@ -0,0 +1,49 @@
|
||||
{{ if .Versions -}}
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased]
|
||||
|
||||
{{ if .Unreleased.CommitGroups -}}
|
||||
{{ range .Unreleased.CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range .Versions }}
|
||||
<a name="{{ .Tag.Name }}"></a>
|
||||
## {{ if .Tag.Previous }}[{{ .Tag.Name }}]{{ else }}{{ .Tag.Name }}{{ end }} - {{ datetime "2006-01-02" .Tag.Date }}
|
||||
{{ range .CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .MergeCommits -}}
|
||||
### Pull Requests
|
||||
{{ range .MergeCommits -}}
|
||||
- {{ .Header }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .NoteGroups -}}
|
||||
{{ range .NoteGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Notes }}
|
||||
{{ .Body }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .Versions }}
|
||||
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
|
||||
{{ range .Versions -}}
|
||||
{{ if .Tag.Previous -}}
|
||||
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
27
.chglog/config.yml
Executable file
27
.chglog/config.yml
Executable file
@ -0,0 +1,27 @@
|
||||
style: github
|
||||
template: CHANGELOG.tpl.md
|
||||
info:
|
||||
title: CHANGELOG
|
||||
repository_url: https://github.com/dosco/super-graph
|
||||
options:
|
||||
commits:
|
||||
# filters:
|
||||
# Type:
|
||||
# - feat
|
||||
# - fix
|
||||
# - perf
|
||||
# - refactor
|
||||
commit_groups:
|
||||
# title_maps:
|
||||
# feat: Features
|
||||
# fix: Bug Fixes
|
||||
# perf: Performance Improvements
|
||||
# refactor: Code Refactoring
|
||||
header:
|
||||
pattern: "^((\\w+)\\s.*)$"
|
||||
pattern_maps:
|
||||
- Subject
|
||||
- Type
|
||||
notes:
|
||||
keywords:
|
||||
- BREAKING CHANGE
|
24
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
24
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- If you suspect this could be a bug, follow the template. -->
|
||||
|
||||
### What version of Super Graph are you using? `super-graph version`
|
||||
|
||||
|
||||
### Have you tried reproducing the issue with the latest release?
|
||||
|
||||
|
||||
### What is the hardware spec (RAM, OS)?
|
||||
|
||||
|
||||
### Steps to reproduce the issue (config used to run Super Graph).
|
||||
|
||||
|
||||
### Expected behaviour and actual result.
|
12
.github/ISSUE_TEMPLATE/documentation.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE/documentation.md
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
name: Documentation
|
||||
about: Suggest how we can improve documentation
|
||||
title: ''
|
||||
labels: bug, docs
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- If you think the Super Graph documentation falls short https://supergraph.dev/guide.html please suggest ways we can improve it. -->
|
||||
|
||||
<!-- explain it here. -->
|
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please only use this template for submitting feature requests -->
|
||||
|
||||
**What would you like to be added**:
|
||||
|
||||
**Why is this needed**:
|
9
.gitignore
vendored
9
.gitignore
vendored
@ -27,4 +27,13 @@
|
||||
main
|
||||
.DS_Store
|
||||
.swp
|
||||
.release
|
||||
main
|
||||
super-graph
|
||||
supergraph
|
||||
*-fuzz.zip
|
||||
crashers
|
||||
suppressions
|
||||
release
|
||||
.gofuzz
|
||||
*-fuzz.zip
|
||||
|
13
.wtc.yaml
Normal file
13
.wtc.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
no_trace: false
|
||||
debounce: 300 # if rule has no debounce, this will be used instead
|
||||
ignore: \.git/
|
||||
trig: [start, run] # will run on start
|
||||
rules:
|
||||
- name: start
|
||||
- name: run
|
||||
match: \.go$
|
||||
ignore: web|examples|docs|_test\.go$
|
||||
command: go run main.go serv
|
||||
- name: test
|
||||
match: _test\.go$
|
||||
command: go test -cover {PKG}
|
424
CHANGELOG.md
Normal file
424
CHANGELOG.md
Normal file
@ -0,0 +1,424 @@
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased]
|
||||
|
||||
### Add
|
||||
- Add config driven custom table relationships
|
||||
- Add support for `websearch_to_tsquery` in PG 11
|
||||
|
||||
### Create
|
||||
- Create CODE_OF_CONDUCT.md
|
||||
|
||||
### Fix
|
||||
- Fix bug with remote join example
|
||||
- Fix grammer / syntax
|
||||
|
||||
### Update
|
||||
- Update issue templates
|
||||
- Update CONTRIBUTING.md
|
||||
- Update issue templates
|
||||
- Update feature_request.md
|
||||
|
||||
|
||||
<a name="v0.12.6"></a>
|
||||
## [v0.12.6] - 2019-12-02
|
||||
### Add
|
||||
- Add support for `websearch_to_tsquery` in PG 11
|
||||
|
||||
|
||||
<a name="v0.12.5"></a>
|
||||
## [v0.12.5] - 2019-11-30
|
||||
### Add
|
||||
- Add a guide to the internals of the codebase
|
||||
- Add a CONTRIBUTING.md guide for contributors
|
||||
- Add a CHANGLOG.md
|
||||
- Add issue templates
|
||||
|
||||
### Fix
|
||||
- Fix for missing filters on nested selectors
|
||||
|
||||
### Refactor
|
||||
- Refactor rename 'Select.Table` to `Select.Name`
|
||||
|
||||
|
||||
<a name="v0.12.4"></a>
|
||||
## [v0.12.4] - 2019-11-28
|
||||
### Move
|
||||
- Move license from MIT to Apache 2.0. Add Makefile
|
||||
|
||||
|
||||
<a name="v0.12.3"></a>
|
||||
## [v0.12.3] - 2019-11-26
|
||||
### Added
|
||||
- Added support for query names to the allow.list
|
||||
|
||||
|
||||
<a name="v0.12.2"></a>
|
||||
## [v0.12.2] - 2019-11-25
|
||||
### Fix
|
||||
- Fix bug with compiling anon queries
|
||||
|
||||
|
||||
<a name="v0.12.1"></a>
|
||||
## [v0.12.1] - 2019-11-22
|
||||
### Move
|
||||
- Move sql query logging from info to debug
|
||||
|
||||
|
||||
<a name="v0.12.0"></a>
|
||||
## [v0.12.0] - 2019-11-22
|
||||
### Use
|
||||
- Use logger error instead of panic in goja handlers
|
||||
|
||||
|
||||
<a name="v0.11.9"></a>
|
||||
## [v0.11.9] - 2019-11-22
|
||||
### Add
|
||||
- Add a db:reset command only for dev mode
|
||||
|
||||
|
||||
<a name="v0.11.8"></a>
|
||||
## [v0.11.8] - 2019-11-21
|
||||
### Optimize
|
||||
- Optimize db queries limit use of transactions
|
||||
|
||||
|
||||
<a name="v0.11.7"></a>
|
||||
## [v0.11.7] - 2019-11-19
|
||||
### Added
|
||||
- Added support for multi-root queries
|
||||
|
||||
|
||||
<a name="v0.11.6"></a>
|
||||
## [v0.11.6] - 2019-11-15
|
||||
### Fix
|
||||
- Fix issues with JWT auth
|
||||
- Fix bug with migration filename generation
|
||||
- Fix bug with migration file name
|
||||
|
||||
|
||||
<a name="v0.11.5"></a>
|
||||
## [v0.11.5] - 2019-11-10
|
||||
### Fix
|
||||
- Fix bug with migration template name
|
||||
|
||||
|
||||
<a name="v0.11.4"></a>
|
||||
## [v0.11.4] - 2019-11-10
|
||||
### Fix
|
||||
- Fix bug with creating new migrations
|
||||
|
||||
|
||||
<a name="v0.11.3"></a>
|
||||
## [v0.11.3] - 2019-11-09
|
||||
### Fix
|
||||
- Fix macro syntax bug in app templates
|
||||
|
||||
|
||||
<a name="v0.11.2"></a>
|
||||
## [v0.11.2] - 2019-11-07
|
||||
### Fix
|
||||
- Fix bugs and add new production mode
|
||||
|
||||
|
||||
<a name="v0.11.1"></a>
|
||||
## [v0.11.1] - 2019-11-05
|
||||
### Add
|
||||
- Add nested where clause to filter based on related tables
|
||||
|
||||
### Block
|
||||
- Block unauthorized requests when 'anon' role is not defined
|
||||
|
||||
### Update
|
||||
- Update docs and website with new features
|
||||
|
||||
|
||||
<a name="v0.11"></a>
|
||||
## [v0.11] - 2019-11-01
|
||||
### Add
|
||||
- Add config driven presets for insert, update and upsert
|
||||
- Add config driven presets for insert, update and upserta
|
||||
- Add RBAC option to disable functions eg. count
|
||||
- Add fuzz testing to 'serv' for the GQL hash parser
|
||||
- Add fuzz testing to 'jsn' and 'qcode'
|
||||
- Add ability to block queries and mutations by role
|
||||
- Add built in 'anon' and 'user' roles
|
||||
- Add role based access control
|
||||
|
||||
### Allow
|
||||
- Allow config files to inherit from other config files
|
||||
|
||||
### Change
|
||||
- Change config key inherit to inherits
|
||||
|
||||
### Get
|
||||
- Get RBAC working for queries and mutations
|
||||
|
||||
### Optimize
|
||||
- Optimize prepared statement flow for RBAC
|
||||
|
||||
### Preserve
|
||||
- Preserve allow.list ordering on save
|
||||
|
||||
### Update
|
||||
- Update filters section in guide
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#11](https://github.com/dosco/super-graph/issues/11) from dosco/rbac
|
||||
|
||||
|
||||
<a name="v0.10.1"></a>
|
||||
## [v0.10.1] - 2019-10-06
|
||||
### Add
|
||||
- Add ability to set filters per operation / action
|
||||
- Add upsert mutation
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#10](https://github.com/dosco/super-graph/issues/10) from FourSigma/sm-examples-folder
|
||||
|
||||
|
||||
<a name="v0.10"></a>
|
||||
## [v0.10] - 2019-10-04
|
||||
### Fix
|
||||
- Fix return values for bulk mutations and delete
|
||||
- Fix issues with mutation SQL
|
||||
- Fix broken demo app
|
||||
- Fix typo in 'across'
|
||||
|
||||
### Remove
|
||||
- Remove extra link from README
|
||||
|
||||
### Update
|
||||
- Update docs, getting started guide and mutations
|
||||
|
||||
### Pull Requests
|
||||
- Merge pull request [#6](https://github.com/dosco/super-graph/issues/6) from muesli/typo-fixes
|
||||
|
||||
|
||||
<a name="v0.9"></a>
|
||||
## [v0.9] - 2019-10-01
|
||||
### Fix
|
||||
- Fix demo rails app broken build
|
||||
|
||||
|
||||
<a name="v0.8"></a>
|
||||
## [v0.8] - 2019-09-30
|
||||
### Fix
|
||||
- Fix invalid import bug
|
||||
|
||||
### Update
|
||||
- Update documentation site
|
||||
|
||||
|
||||
<a name="v0.7"></a>
|
||||
## [v0.7] - 2019-09-29
|
||||
### Failure
|
||||
- Failure to prepare statements should be a warning
|
||||
|
||||
### Fix
|
||||
- Fix duplicte column bug
|
||||
|
||||
|
||||
<a name="v0.6"></a>
|
||||
## [v0.6] - 2019-09-29
|
||||
### Add
|
||||
- Add database setup commands
|
||||
- Add binary compression back to Dockerfile
|
||||
- Add initialization command to setup new apps
|
||||
- Add migrate command
|
||||
- Add database seeding capability
|
||||
- Add session variable for user id
|
||||
- Add delete mutation
|
||||
- Add update mutation
|
||||
- Add insert mutation with bulk insert
|
||||
- Add GoTO Aug, 19 presentation
|
||||
- Add support for prepared statements
|
||||
- Add end-to-end benchmaking
|
||||
- Add object pooling for parser expressions
|
||||
- Add request / response debugging for remote joins
|
||||
- Add a presentation about GraphQL
|
||||
- Add validation for remote JSON
|
||||
- Add tracing for API stitching
|
||||
- Add REST API stitching
|
||||
- Add SQL query cacheing
|
||||
- Add support for GraphQL variables
|
||||
- Add fuzz testing to qcode
|
||||
- Add test for Rails Redis cookie store integration
|
||||
- Add an install guide
|
||||
|
||||
### Change
|
||||
- Change fuzz test name to qcode
|
||||
- Change logo from PNG to SVG
|
||||
|
||||
### Enabke
|
||||
- Enabke reload on config change
|
||||
|
||||
### Fix
|
||||
- Fix missing config name bug
|
||||
- Fix new app templates
|
||||
- Fix help message for migrate
|
||||
- Fix session variable bug
|
||||
- Fix test failures in `psql` and `serv`
|
||||
- Fix demo docker services startup order
|
||||
- Fix wrong value for false token bug. Reported by [@ThisIsMissEm](https://github.com/ThisIsMissEm)
|
||||
- Fix allow.list file discovery bug
|
||||
- Fix bug with allow list path
|
||||
- Fix wrong value for use_allow_list in dev config
|
||||
- Fix startup bug in demo script
|
||||
- Fix url bug in allow list
|
||||
- Fix bug [#676](https://github.com/dosco/super-graph/issues/676) found by fuzzer
|
||||
- Fix race-condition in remote joins
|
||||
- Fix cookie passing in web ui
|
||||
- Fix bug with passing cookies in web ui
|
||||
- Fix null pointer with invalid argument values
|
||||
- Fix infinite loop bug in lexer
|
||||
- Fix null pointer issue found by fuzz test
|
||||
- Fix issue with fuzzbuzz config
|
||||
- Fix demo to run as memory only
|
||||
- Fix auth documentation
|
||||
- Fix issue with web ui sizing
|
||||
- Fix issue preventing docker-compose deploy
|
||||
- Fix try demo documentation
|
||||
|
||||
### Futher
|
||||
- Futher reduce allocations across hot paths
|
||||
- Futher reduce allocations on the compiler hot path
|
||||
- Futher optimize json parsing and editing performance
|
||||
|
||||
### Highlight
|
||||
- Highlight top features better on the site
|
||||
|
||||
### Improve
|
||||
- Improve readability of json parser code
|
||||
- Improve the motivation section in the readme
|
||||
- Improve the demo experience
|
||||
|
||||
### Make
|
||||
- Make remote joins use parallel http requests
|
||||
|
||||
### Merge
|
||||
- Merge branch 'master' into optimize-psql
|
||||
|
||||
### New
|
||||
- New low allocation fast json parsing and editing library
|
||||
|
||||
### Optimize
|
||||
- Optimize lexer and fix bugs
|
||||
- Optimize the sql generator hot path
|
||||
|
||||
### Reduce
|
||||
- Reduce alllocations done by the stack
|
||||
- Reduce steps to run the demo
|
||||
- Reduce allocations and improve perf over 50%
|
||||
|
||||
### Remove
|
||||
- Remove unused packages
|
||||
- Remove the 'hello' test app folder
|
||||
- Remove other allocations in psql
|
||||
|
||||
### Use
|
||||
- Use hash's as ids for table relationships
|
||||
|
||||
### Watch
|
||||
- Watch and reload on config changes
|
||||
|
||||
|
||||
<a name="v0.5"></a>
|
||||
## [v0.5] - 2019-04-10
|
||||
### Add
|
||||
- Add supprt for new Rails 5.2 aes-256-gcm cookies
|
||||
- Add query support for ts_rank and ts_headline
|
||||
- Add full text search support using TSV indexes
|
||||
- Add missing assets folder
|
||||
- Add fetch by ID feature
|
||||
- Add documentation
|
||||
|
||||
### Cleanup
|
||||
- Cleanup and redesign config files
|
||||
|
||||
### Fix
|
||||
- Fix bug with auth config parsing
|
||||
|
||||
### Redesign
|
||||
- Redesign config file architecture
|
||||
|
||||
### Reduce
|
||||
- Reduce realloc of maps and slices
|
||||
|
||||
### Update
|
||||
- Update docs with full-text search information
|
||||
|
||||
|
||||
<a name="v0.4"></a>
|
||||
## [v0.4] - 2019-04-01
|
||||
|
||||
<a name="v0.3"></a>
|
||||
## [v0.3] - 2019-04-01
|
||||
### Add
|
||||
- Add SQL execution timing and tracing
|
||||
- Add support for HAVING with aggregate queries
|
||||
- Add aggregrate functions to GQL queries
|
||||
- Add Auth0 JWT support
|
||||
- Add React UI building to the docker build flow
|
||||
- Add compiler profiling
|
||||
- Add bechmarks for GQL to SQL compile
|
||||
- Add tests for gql to sql compile
|
||||
|
||||
### Cleanup
|
||||
- Cleanup Dockerfile
|
||||
|
||||
### Fix
|
||||
- Fix recurring packer issue docker hub builds
|
||||
- Fix issue with asset packer breaking Docker builds
|
||||
- Fix missing git package in Dockerfile
|
||||
- Fix docker ignore values
|
||||
- Fix image build failure on docker hub
|
||||
- Fix build issue in Dockerfile
|
||||
- Fix bugs and document the 'where' clause
|
||||
- Fix perf issue with inflections
|
||||
|
||||
### Optimize
|
||||
- Optimize docker image
|
||||
|
||||
### Pack
|
||||
- Pack web UI with app into a single binary
|
||||
|
||||
### Upgrade
|
||||
- Upgrade web UI packages
|
||||
|
||||
|
||||
<a name="0.3"></a>
|
||||
## 0.3 - 2019-03-24
|
||||
### First
|
||||
- First commit
|
||||
|
||||
### Fix
|
||||
- Fix license to MIT
|
||||
|
||||
|
||||
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.12.6...HEAD
|
||||
[v0.12.6]: https://github.com/dosco/super-graph/compare/v0.12.5...v0.12.6
|
||||
[v0.12.5]: https://github.com/dosco/super-graph/compare/v0.12.4...v0.12.5
|
||||
[v0.12.4]: https://github.com/dosco/super-graph/compare/v0.12.3...v0.12.4
|
||||
[v0.12.3]: https://github.com/dosco/super-graph/compare/v0.12.2...v0.12.3
|
||||
[v0.12.2]: https://github.com/dosco/super-graph/compare/v0.12.1...v0.12.2
|
||||
[v0.12.1]: https://github.com/dosco/super-graph/compare/v0.12.0...v0.12.1
|
||||
[v0.12.0]: https://github.com/dosco/super-graph/compare/v0.11.9...v0.12.0
|
||||
[v0.11.9]: https://github.com/dosco/super-graph/compare/v0.11.8...v0.11.9
|
||||
[v0.11.8]: https://github.com/dosco/super-graph/compare/v0.11.7...v0.11.8
|
||||
[v0.11.7]: https://github.com/dosco/super-graph/compare/v0.11.6...v0.11.7
|
||||
[v0.11.6]: https://github.com/dosco/super-graph/compare/v0.11.5...v0.11.6
|
||||
[v0.11.5]: https://github.com/dosco/super-graph/compare/v0.11.4...v0.11.5
|
||||
[v0.11.4]: https://github.com/dosco/super-graph/compare/v0.11.3...v0.11.4
|
||||
[v0.11.3]: https://github.com/dosco/super-graph/compare/v0.11.2...v0.11.3
|
||||
[v0.11.2]: https://github.com/dosco/super-graph/compare/v0.11.1...v0.11.2
|
||||
[v0.11.1]: https://github.com/dosco/super-graph/compare/v0.11...v0.11.1
|
||||
[v0.11]: https://github.com/dosco/super-graph/compare/v0.10.1...v0.11
|
||||
[v0.10.1]: https://github.com/dosco/super-graph/compare/v0.10...v0.10.1
|
||||
[v0.10]: https://github.com/dosco/super-graph/compare/v0.9...v0.10
|
||||
[v0.9]: https://github.com/dosco/super-graph/compare/v0.8...v0.9
|
||||
[v0.8]: https://github.com/dosco/super-graph/compare/v0.7...v0.8
|
||||
[v0.7]: https://github.com/dosco/super-graph/compare/v0.6...v0.7
|
||||
[v0.6]: https://github.com/dosco/super-graph/compare/v0.5...v0.6
|
||||
[v0.5]: https://github.com/dosco/super-graph/compare/v0.4...v0.5
|
||||
[v0.4]: https://github.com/dosco/super-graph/compare/v0.3...v0.4
|
||||
[v0.3]: https://github.com/dosco/super-graph/compare/0.3...v0.3
|
3
CODE_OF_CONDUCT.md
Normal file
3
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Code of Conduct
|
||||
|
||||
Be excellent to each other. Treat others the way you'd like to be treated. We are all here to learn, build great software and make new friends.
|
82
CONTRIBUTING.md
Normal file
82
CONTRIBUTING.md
Normal file
@ -0,0 +1,82 @@
|
||||
# Contributing to Super Graph
|
||||
|
||||
Super Graph is a very approchable code-base and a project that is easy for almost
|
||||
anyone with basic GO knowledge to start contributing to. It is also a young project
|
||||
so a lot of high value work is there for the taking.
|
||||
|
||||
Even the GraphQL to SQL compiler that is at the heart of Super Graph is essentially a text book compiler with clean and easy to read code. The data structures used by the lexer, parser and sql generator are easy to understand and modify.
|
||||
|
||||
Finally we do have a lot of test for critical parts of the codebase which makes it easy for you to modify with confidence. I'm always available for questions or any sort of guidance so feel fee to reach out over twitter or discord.
|
||||
|
||||
* [Getting Started](#getting-started)
|
||||
* [Setting Up the Development Environment](#setup-development-environment)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Get the Super Graph source](#get-source-code)
|
||||
* [Start the development envoirnment ](#start-the-development-envoirnment)
|
||||
* [Testing](#testing-and-linting)
|
||||
* [Contributing](#contributing)
|
||||
* [Guidelines](#guidelines)
|
||||
* [Code style](#code-style)
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Read the [Getting Started Guide](https://supergraph.dev/guide.html#get-started)
|
||||
|
||||
## Setup Development Environment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Install [Git](https://git-scm.com/) (may be already installed on your system, or available through your OS package manager)
|
||||
- Install [Go 1.13 or above](https://golang.org/doc/install)
|
||||
- Install [Docker](https://docs.docker.com/v17.09/engine/installation/)
|
||||
|
||||
### Get source code
|
||||
|
||||
The entire build flow uses `Makefile` there is a whole list of sub-commands you
|
||||
can use to build, test, install, lint, etc.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/dosco/super-graph
|
||||
cd ./super-graph
|
||||
make help
|
||||
```
|
||||
|
||||
### Start the development envoirnment
|
||||
|
||||
The entire development flow is packaged into a `docker-compose` work flow. The below `up` command will launch A Postgres database, a example e-commerce app in Rails and Super Graph in development mode. The `db:seed` Rails task will insert sample data into Postgres.
|
||||
|
||||
```bash
|
||||
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
### Learn how the code works
|
||||
|
||||
[Super Graph codebase explained](https://supergraph.dev/internals.html)
|
||||
|
||||
### Testing and Linting
|
||||
|
||||
```
|
||||
make lint test
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
### Guidelines
|
||||
|
||||
- **Pull requests are welcome**, as long as you're willing to put in the effort to meet the guidelines.
|
||||
- Aim for clear, well written, maintainable code.
|
||||
- Simple and minimal approach to features, like Go.
|
||||
- Refactoring existing code now for better performance, better readability or better testability wins over adding a new feature.
|
||||
- Don't add a function to a module that you don't use right now, or doesn't clearly enable a planned functionality.
|
||||
- Don't ship a half done feature, which would require significant alterations to work fully.
|
||||
- Avoid [Technical debt](https://en.wikipedia.org/wiki/Technical_debt) like cancer.
|
||||
- Leave the code cleaner than when you began.
|
||||
|
||||
### Code style
|
||||
|
||||
- We're following [Go Code Review](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
- Use `go fmt` to format your code before committing.
|
||||
- If you see *any code* which clearly violates the style guide, please fix it and send a pull request. No need to ask for permission.
|
||||
- Avoid unnecessary vertical spaces. Use your judgment or follow the code review comments.
|
||||
- Wrap your code and comments to 100 characters, unless doing so makes the code less legible.
|
27
Dockerfile
27
Dockerfile
@ -6,14 +6,18 @@ RUN yarn
|
||||
RUN yarn build
|
||||
|
||||
# stage: 2
|
||||
FROM golang:1.13beta1-alpine as go-build
|
||||
FROM golang:1.14-alpine as go-build
|
||||
RUN apk update && \
|
||||
apk add --no-cache make && \
|
||||
apk add --no-cache git && \
|
||||
apk add --no-cache jq && \
|
||||
apk add --no-cache upx=3.95-r2
|
||||
|
||||
RUN go get -u github.com/shanzi/wu && \
|
||||
go install github.com/shanzi/wu && \
|
||||
go get github.com/GeertJohan/go.rice/rice
|
||||
RUN GO111MODULE=off go get -u github.com/rafaelsq/wtc
|
||||
|
||||
ARG SOPS_VERSION=3.5.0
|
||||
ADD https://github.com/mozilla/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux /usr/local/bin/sops
|
||||
RUN chmod 755 /usr/local/bin/sops
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
@ -21,11 +25,9 @@ COPY . /app
|
||||
RUN mkdir -p /app/web/build
|
||||
COPY --from=react-build /web/build/ ./web/build/
|
||||
|
||||
ENV GO111MODULE=on
|
||||
RUN go mod vendor
|
||||
RUN go generate ./... && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o super-graph && \
|
||||
echo "Compressing binary, will take a bit of time..." && \
|
||||
RUN make build
|
||||
RUN echo "Compressing binary, will take a bit of time..." && \
|
||||
upx --ultra-brute -qq super-graph && \
|
||||
upx -t super-graph
|
||||
|
||||
@ -39,10 +41,15 @@ RUN mkdir -p /config
|
||||
COPY --from=go-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=go-build /app/config/* /config/
|
||||
COPY --from=go-build /app/super-graph .
|
||||
COPY --from=go-build /app/scripts/start.sh .
|
||||
COPY --from=go-build /usr/local/bin/sops .
|
||||
|
||||
RUN chmod +x /super-graph
|
||||
RUN chmod +x /start.sh
|
||||
|
||||
USER nobody
|
||||
|
||||
EXPOSE 8080
|
||||
ENV GO_ENV production
|
||||
|
||||
CMD ./super-graph serv
|
||||
ENTRYPOINT ["./start.sh"]
|
||||
CMD ["./super-graph", "serv"]
|
||||
|
189
LICENSE
189
LICENSE
@ -1,21 +1,176 @@
|
||||
The MIT License (MIT)
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Copyright (c) 2019-present Vikram Rangnekar. twitter.com/dosco
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
1. Definitions.
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
109
Makefile
Normal file
109
Makefile
Normal file
@ -0,0 +1,109 @@
|
||||
BUILD ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_DATE ?= $(shell git log -1 --format=%ci)
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
|
||||
ifndef GOPATH
|
||||
override GOPATH = $(HOME)/go
|
||||
endif
|
||||
|
||||
export GO111MODULE := on
|
||||
|
||||
# Build-time Go variables
|
||||
version = github.com/dosco/super-graph/serv.version
|
||||
gitBranch = github.com/dosco/super-graph/serv.gitBranch
|
||||
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA
|
||||
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime
|
||||
|
||||
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
|
||||
|
||||
.PHONY: all build gen clean test run lint changlog release version help $(PLATFORMS)
|
||||
|
||||
test:
|
||||
@go test -v ./...
|
||||
|
||||
BIN_DIR := $(GOPATH)/bin
|
||||
GORICE := $(BIN_DIR)/rice
|
||||
GOLANGCILINT := $(BIN_DIR)/golangci-lint
|
||||
GITCHGLOG := $(BIN_DIR)/git-chglog
|
||||
WEB_BUILD_DIR := ./web/build/manifest.json
|
||||
|
||||
$(GORICE):
|
||||
@GO111MODULE=off go get -u github.com/GeertJohan/go.rice/rice
|
||||
|
||||
$(WEB_BUILD_DIR):
|
||||
@echo "First install Yarn and create a build of the web UI found under ./web"
|
||||
@echo "Command: cd web && yarn && yarn build"
|
||||
@exit 1
|
||||
|
||||
$(GITCHGLOG):
|
||||
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/cmd/git-chglog
|
||||
|
||||
changelog: $(GITCHGLOG)
|
||||
@git-chglog $(ARGS)
|
||||
|
||||
$(GOLANGCILINT):
|
||||
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.21.0
|
||||
|
||||
lint: $(GOLANGCILINT)
|
||||
@golangci-lint run ./... --skip-dirs-use-default
|
||||
|
||||
BINARY := super-graph
|
||||
LDFLAGS := -s -w
|
||||
PLATFORMS := windows linux darwin
|
||||
os = $(word 1, $@)
|
||||
|
||||
$(PLATFORMS): lint test
|
||||
@mkdir -p release
|
||||
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64
|
||||
|
||||
release: windows linux darwin
|
||||
|
||||
all: lint test $(BINARY)
|
||||
|
||||
build: $(BINARY)
|
||||
|
||||
gen: $(GORICE) $(WEB_BUILD_DIR)
|
||||
@go generate ./...
|
||||
|
||||
$(BINARY): clean
|
||||
@go build $(BUILD_FLAGS) -o $(BINARY)
|
||||
|
||||
clean:
|
||||
@rm -f $(BINARY)
|
||||
|
||||
run: clean
|
||||
@go run $(BUILD_FLAGS) main.go $(ARGS)
|
||||
|
||||
install: gen
|
||||
@echo $(GOPATH)
|
||||
@echo "Commit Hash: `git rev-parse HEAD`"
|
||||
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
|
||||
@go install $(BUILD_FLAGS)
|
||||
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
|
||||
|
||||
uninstall: clean
|
||||
@go clean -i -x
|
||||
|
||||
version:
|
||||
@echo Super Graph ${BUILD_VERSION}
|
||||
@echo Build: ${BUILD}
|
||||
@echo Build date: ${BUILD_DATE}
|
||||
@echo Branch: ${BUILD_BRANCH}
|
||||
@echo Go version: $(shell go version)
|
||||
|
||||
help:
|
||||
@echo
|
||||
@echo Build commands:
|
||||
@echo " make build - Build supergraph binary"
|
||||
@echo " make install - Install supergraph binary"
|
||||
@echo " make uninstall - Uninstall supergraph binary"
|
||||
@echo " make [platform] - Build for platform [linux|darwin|windows]"
|
||||
@echo " make release - Build all platforms"
|
||||
@echo " make run - Run supergraph (eg. make run ARGS=\"help\")"
|
||||
@echo " make test - Run all tests"
|
||||
@echo " make changelog - Generate changelog (eg. make changelog ARGS=\"help\")"
|
||||
@echo " make help - This help"
|
||||
@echo
|
13
NOTICE
Normal file
13
NOTICE
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2019 Vikram Rangnekar
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
53
README.md
53
README.md
@ -1,16 +1,25 @@
|
||||
<a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a>
|
||||
<!-- <a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a> -->
|
||||
|
||||
# Super Graph - Build web products faster. Instant GraphQL APIs for your apps
|
||||
<img src="docs/.vuepress/public/super-graph.png" width="250" />
|
||||
|
||||

|
||||
### Build web products faster. Secure high performance GraphQL
|
||||
|
||||

|
||||

|
||||

|
||||
[](https://discord.gg/6pSWCTZ)
|
||||
|
||||
Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries.
|
||||
|
||||
## What is Super Graph
|
||||
|
||||
Is designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance and secure GraphQL API for Postgres DB. GraphQL queries are translated into a single fast SQL query. No more writing API code as you develop
|
||||
your web frontend just make the query you need and Super Graph will do the rest.
|
||||
|
||||
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, role and attribute based access control, support for JWT tokens, built-in DB mutations and seeding, and a lot more.
|
||||
|
||||

|
||||
|
||||
|
||||
## The story of Super Graph?
|
||||
|
||||
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
|
||||
@ -25,20 +34,31 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
|
||||
|
||||
## Features
|
||||
|
||||
- Works with Rails database schemas
|
||||
- Automatically learns schemas and relationships
|
||||
- Belongs-To, One-To-Many and Many-To-Many table relationships
|
||||
- Full text search and Aggregations
|
||||
- Rails Auth supported (Redis, Memcache, Cookie)
|
||||
- Complex nested queries and mutations
|
||||
- Auto learns database tables and relationships
|
||||
- Role and Attribute based access control
|
||||
- Full text search and aggregations
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join with remote REST APIs
|
||||
- Highly optimized and fast Postgres SQL queries
|
||||
- Support GraphQL queries and mutations
|
||||
- Configure with a simple config file
|
||||
- Join database queries with remote REST APIs
|
||||
- Also works with existing Ruby-On-Rails apps
|
||||
- Rails authentication supported (Redis, Memcache, Cookie)
|
||||
- A simple config file
|
||||
- High performance GO codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Fuzz tested for security
|
||||
- Database migrations tool
|
||||
- Write database seeding scripts in Javascript
|
||||
- Database seeding tool
|
||||
- Works with Postgres and YugabyteDB
|
||||
|
||||
## Get started
|
||||
|
||||
```
|
||||
git clone https://github.com/dosco/super-graph
|
||||
cd ./super-graph
|
||||
make install
|
||||
|
||||
super-graph new <app_name>
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
@ -46,13 +66,16 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
|
||||
|
||||
## Contact me
|
||||
|
||||
I'm happy to help you deploy Super Graph so feel free to reach out over
|
||||
Twitter or Discord.
|
||||
|
||||
[twitter/dosco](https://twitter.com/dosco)
|
||||
|
||||
[chat/super-graph](https://discord.gg/6pSWCTZ)
|
||||
|
||||
## License
|
||||
|
||||
[MIT](http://opensource.org/licenses/MIT)
|
||||
[Apache Public License 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
Copyright (c) 2019-present Vikram Rangnekar
|
||||
|
||||
|
361
allow/allow.go
Normal file
361
allow/allow.go
Normal file
@ -0,0 +1,361 @@
|
||||
package allow
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
AL_QUERY int = iota + 1
|
||||
AL_VARS
|
||||
)
|
||||
|
||||
type Item struct {
|
||||
Name string
|
||||
key string
|
||||
Query string
|
||||
Vars json.RawMessage
|
||||
Comment string
|
||||
}
|
||||
|
||||
type List struct {
|
||||
filepath string
|
||||
saveChan chan Item
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
CreateIfNotExists bool
|
||||
Persist bool
|
||||
}
|
||||
|
||||
func New(cpath string, conf Config) (*List, error) {
|
||||
al := List{}
|
||||
|
||||
if len(cpath) != 0 {
|
||||
fp := path.Join(cpath, "allow.list")
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
al.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(al.filepath) == 0 {
|
||||
fp := "./allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
al.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(al.filepath) == 0 {
|
||||
fp := "./config/allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
al.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(al.filepath) == 0 {
|
||||
if !conf.CreateIfNotExists {
|
||||
return nil, errors.New("allow.list not found")
|
||||
}
|
||||
|
||||
if len(cpath) == 0 {
|
||||
al.filepath = "./config/allow.list"
|
||||
} else {
|
||||
al.filepath = path.Join(cpath, "allow.list")
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if conf.Persist {
|
||||
al.saveChan = make(chan Item)
|
||||
|
||||
go func() {
|
||||
for v := range al.saveChan {
|
||||
if err = al.save(v); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &al, nil
|
||||
}
|
||||
|
||||
func (al *List) IsPersist() bool {
|
||||
return al.saveChan != nil
|
||||
}
|
||||
|
||||
func (al *List) Set(vars []byte, query, comment string) error {
|
||||
if al.saveChan == nil {
|
||||
return errors.New("allow.list is read-only")
|
||||
}
|
||||
|
||||
if len(query) == 0 {
|
||||
return errors.New("empty query")
|
||||
}
|
||||
|
||||
var q string
|
||||
|
||||
for i := 0; i < len(query); i++ {
|
||||
c := query[i]
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
|
||||
q = query
|
||||
break
|
||||
|
||||
} else if c == '{' {
|
||||
q = "query " + query
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
al.saveChan <- Item{
|
||||
Comment: comment,
|
||||
Query: q,
|
||||
Vars: vars,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (al *List) Load() ([]Item, error) {
|
||||
var list []Item
|
||||
|
||||
b, err := ioutil.ReadFile(al.filepath)
|
||||
if err != nil {
|
||||
return list, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return list, nil
|
||||
}
|
||||
|
||||
var comment bytes.Buffer
|
||||
var varBytes []byte
|
||||
|
||||
itemMap := make(map[string]struct{})
|
||||
|
||||
s, e, c := 0, 0, 0
|
||||
ty := 0
|
||||
|
||||
for {
|
||||
fq := false
|
||||
|
||||
if c == 0 && b[e] == '#' {
|
||||
s = e
|
||||
for e < len(b) && b[e] != '\n' {
|
||||
e++
|
||||
}
|
||||
if (e - s) > 2 {
|
||||
comment.Write(b[(s + 1):(e + 1)])
|
||||
}
|
||||
}
|
||||
|
||||
if e >= len(b) {
|
||||
break
|
||||
}
|
||||
|
||||
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
|
||||
if c == 0 {
|
||||
s = e
|
||||
}
|
||||
ty = AL_QUERY
|
||||
} else if matchPrefix(b, e, "variables") {
|
||||
if c == 0 {
|
||||
s = e + len("variables") + 1
|
||||
}
|
||||
ty = AL_VARS
|
||||
} else if b[e] == '{' {
|
||||
c++
|
||||
|
||||
} else if b[e] == '}' {
|
||||
c--
|
||||
|
||||
if c == 0 {
|
||||
if ty == AL_QUERY {
|
||||
fq = true
|
||||
} else if ty == AL_VARS {
|
||||
varBytes = b[s:(e + 1)]
|
||||
}
|
||||
ty = 0
|
||||
}
|
||||
}
|
||||
|
||||
if fq {
|
||||
query := string(b[s:(e + 1)])
|
||||
name := QueryName(query)
|
||||
key := strings.ToLower(name)
|
||||
|
||||
if _, ok := itemMap[key]; !ok {
|
||||
v := Item{
|
||||
Name: name,
|
||||
key: key,
|
||||
Query: query,
|
||||
Vars: varBytes,
|
||||
Comment: comment.String(),
|
||||
}
|
||||
list = append(list, v)
|
||||
comment.Reset()
|
||||
}
|
||||
|
||||
varBytes = nil
|
||||
|
||||
}
|
||||
|
||||
e++
|
||||
if e >= len(b) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (al *List) save(item Item) error {
|
||||
item.Name = QueryName(item.Query)
|
||||
item.key = strings.ToLower(item.Name)
|
||||
|
||||
if len(item.Name) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := al.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index := -1
|
||||
|
||||
for i, v := range list {
|
||||
if strings.EqualFold(v.Name, item.Name) {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index != -1 {
|
||||
if len(list[index].Comment) != 0 {
|
||||
item.Comment = list[index].Comment
|
||||
}
|
||||
list[index] = item
|
||||
} else {
|
||||
list = append(list, item)
|
||||
}
|
||||
|
||||
f, err := os.Create(al.filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
return strings.Compare(list[i].key, list[j].key) == -1
|
||||
})
|
||||
|
||||
for _, v := range list {
|
||||
cmtLines := strings.Split(v.Comment, "\n")
|
||||
|
||||
i := 0
|
||||
for _, c := range cmtLines {
|
||||
if c = strings.TrimSpace(c); len(c) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := f.WriteString(fmt.Sprintf("# %s\n", c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
if _, err := f.WriteString("\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := f.WriteString(fmt.Sprintf("# Query named %s\n\n", v.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
|
||||
vj, err := json.MarshalIndent(v.Vars, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal vars: %v", err)
|
||||
}
|
||||
|
||||
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v.Query[0] == '{' {
|
||||
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v.Query))
|
||||
} else {
|
||||
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v.Query))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func matchPrefix(b []byte, i int, s string) bool {
|
||||
if (len(b) - i) < len(s) {
|
||||
return false
|
||||
}
|
||||
for n := 0; n < len(s); n++ {
|
||||
if b[(i+n)] != s[n] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func QueryName(b string) string {
|
||||
state, s := 0, 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
switch {
|
||||
case state == 2 && !isValidNameChar(b[i]):
|
||||
return b[s:i]
|
||||
case state == 1 && b[i] == '{':
|
||||
return ""
|
||||
case state == 1 && isValidNameChar(b[i]):
|
||||
s = i
|
||||
state = 2
|
||||
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
|
||||
state = 1
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func isValidNameChar(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_'
|
||||
}
|
84
allow/allow_test.go
Normal file
84
allow/allow_test.go
Normal file
@ -0,0 +1,84 @@
|
||||
package allow
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGQLName1(t *testing.T) {
|
||||
var q = `
|
||||
query {
|
||||
products(
|
||||
distinct: [price]
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
|
||||
) { id name } }`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if len(name) != 0 {
|
||||
t.Fatal("Name should be empty, not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName2(t *testing.T) {
|
||||
var q = `
|
||||
query hakuna_matata
|
||||
|
||||
{
|
||||
products(
|
||||
distinct: [price]
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
|
||||
) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "hakuna_matata" {
|
||||
t.Fatal("Name should be 'hakuna_matata', not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName3(t *testing.T) {
|
||||
var q = `
|
||||
mutation means{ users { id } }`
|
||||
|
||||
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "means" {
|
||||
t.Fatal("Name should be 'means', not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName4(t *testing.T) {
|
||||
var q = `
|
||||
query no_worries
|
||||
users {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "no_worries" {
|
||||
t.Fatal("Name should be 'no_worries', not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName5(t *testing.T) {
|
||||
var q = `
|
||||
{
|
||||
users {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if len(name) != 0 {
|
||||
t.Fatal("Name should be empty, not ", name)
|
||||
}
|
||||
}
|
15
allow/fuzz_test.go
Normal file
15
allow/fuzz_test.go
Normal file
@ -0,0 +1,15 @@
|
||||
package allow
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFuzzCrashers(t *testing.T) {
|
||||
var crashers = []string{
|
||||
"query",
|
||||
"q",
|
||||
"que",
|
||||
}
|
||||
|
||||
for _, f := range crashers {
|
||||
_ = QueryName(f)
|
||||
}
|
||||
}
|
@ -1,5 +1,27 @@
|
||||
# http://localhost:8080/
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Protect Ya Neck",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Enter the Wu-Tang",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Wu-Tang",
|
||||
@ -16,16 +38,16 @@ mutation {
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"product_id": 5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, delete: true) {
|
||||
query {
|
||||
users {
|
||||
id
|
||||
name
|
||||
email
|
||||
picture: avatar
|
||||
products(limit: 2, where: {price: {gt: 10}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,73 +73,37 @@ mutation {
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user {
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Gumbo1",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Gumbo2",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
"data": {
|
||||
"product_id": 5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, delete: true) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Helloo",
|
||||
"description": "World \u003c\u003e"
|
||||
},
|
||||
"user": 123
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: 5, update: $update) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Gumbo1",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Gumbo2",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
query {
|
||||
product {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
query {
|
||||
me {
|
||||
id
|
||||
email
|
||||
full_name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,15 +122,474 @@ mutation {
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Helloo",
|
||||
"description": "World \u003c\u003e"
|
||||
},
|
||||
"user": 123
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: 5, update: $update) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "WOOO",
|
||||
"price": 50.5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query getProducts {
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
deals {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"beer": "smoke"
|
||||
}
|
||||
|
||||
query beerSearch {
|
||||
products(search: $beer) {
|
||||
id
|
||||
name
|
||||
search_rank
|
||||
search_headline_description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "goo1@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
picture: avatar
|
||||
products(limit: 2, where: {price: {gt: 10}}) {
|
||||
product {
|
||||
id
|
||||
name
|
||||
description
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "goo12@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": [
|
||||
{
|
||||
"name": "Banana 1",
|
||||
"price": 1.1,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Banana 2",
|
||||
"price": 2.2,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Banana 3",
|
||||
"price": 1.1,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "a2@a.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
price
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name",
|
||||
"description": "my_desc"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(id: 15, update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name",
|
||||
"description": "my_desc"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name 2",
|
||||
"description": "my_desc 2"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"sale_type": "tuutuu",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude1@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
purchase(update: $data, id: 5) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(update: $data, where: {id: {eq: 8}}) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user(where: {id: {eq: 8}}) {
|
||||
id
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "thedude@rug.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user {
|
||||
email
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "booboo@demo.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 6) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "booboo@demo.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
product(id: 6) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude123@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": {
|
||||
"id": 7
|
||||
},
|
||||
"disconnect": {
|
||||
"id": 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(update: $data, id: 6) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5,
|
||||
"email": "test@test.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thed44ude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -152,12 +597,53 @@ query {
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Protect Ya Neck",
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 6
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Coconut",
|
||||
"price": 2.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Enter the Wu-Tang",
|
||||
"name": "Coconut",
|
||||
"price": 2.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
@ -168,6 +654,102 @@ mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5,
|
||||
"email": "test@test.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 2) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
165
config/dev.yml
165
config/dev.yml
@ -5,15 +5,17 @@ web_ui: true
|
||||
# debug, info, warn, error, fatal, panic
|
||||
log_level: "debug"
|
||||
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: false
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: false
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: never
|
||||
auth_fail_block: false
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
@ -22,7 +24,7 @@ enable_tracing: true
|
||||
|
||||
# Watch the config folder and reload Super Graph
|
||||
# with the new configs when a change is detected
|
||||
reload_on_config_change: false
|
||||
reload_on_config_change: true
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
@ -30,6 +32,19 @@ reload_on_config_change: false
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./config/migrations
|
||||
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
secret_key: supercalifajalistics
|
||||
|
||||
# CORS: A list of origins a cross-domain request can be executed from.
|
||||
# If the special * value is present in the list, all origins will be allowed.
|
||||
# An origin may contain a wildcard (*) to replace 0 or more
|
||||
# characters (i.e.: http://*.domain.com).
|
||||
cors_allowed_origins: ["*"]
|
||||
|
||||
# Debug Cross Origin Resource Sharing requests
|
||||
cors_debug: true
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
@ -52,8 +67,9 @@ auth:
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header. Good for testing
|
||||
header: X-User-ID
|
||||
# the user_id via a header for testing.
|
||||
# Disable in production
|
||||
creds_in_header: true
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
@ -86,51 +102,35 @@ database:
|
||||
port: 5432
|
||||
dbname: app_development
|
||||
user: postgres
|
||||
password: ''
|
||||
password: postgres
|
||||
|
||||
#schema: "public"
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Define variables here that you want to use in filters
|
||||
# sub-queries must be wrapped in ()
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
account_id: "(select account_id from users where id = $user_id)"
|
||||
admin_account_id: "5"
|
||||
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
# filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
# filter: ["{ id: { eq: $user_id } }"]
|
||||
# filter_query: ["{ id: { eq: $user_id } }"]
|
||||
filter_update: ["{ id: { eq: $user_id } }"]
|
||||
filter_delete: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
# - name: products
|
||||
# # Multiple filters are AND'd together
|
||||
# filter: [
|
||||
# "{ price: { gt: 0 } }",
|
||||
# "{ price: { lt: 8 } }"
|
||||
# ]
|
||||
|
||||
- name: customers
|
||||
# No filter is used for this field not
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
@ -149,7 +149,78 @@ tables:
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
# - name: posts
|
||||
# filter: ["{ account_id: { _eq: $account_id } }"]
|
||||
- name: deals
|
||||
table: products
|
||||
|
||||
- name: users
|
||||
columns:
|
||||
- name: email
|
||||
related_to: products.name
|
||||
|
||||
|
||||
roles_query: "SELECT * FROM users WHERE id = $user_id"
|
||||
|
||||
roles:
|
||||
- name: anon
|
||||
tables:
|
||||
- name: products
|
||||
query:
|
||||
limit: 10
|
||||
columns: ["id", "name", "description" ]
|
||||
aggregation: false
|
||||
|
||||
insert:
|
||||
block: false
|
||||
|
||||
update:
|
||||
block: false
|
||||
|
||||
delete:
|
||||
block: false
|
||||
|
||||
- name: deals
|
||||
query:
|
||||
limit: 3
|
||||
aggregation: false
|
||||
|
||||
- name: purchases
|
||||
query:
|
||||
limit: 3
|
||||
aggregation: false
|
||||
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
query:
|
||||
limit: 50
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
disable_functions: false
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
presets:
|
||||
- user_id: "$user_id"
|
||||
- created_at: "now"
|
||||
- updated_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
presets:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: id = 1000
|
||||
tables:
|
||||
- name: users
|
||||
filters: []
|
||||
|
128
config/prod.yml
128
config/prod.yml
@ -1,3 +1,7 @@
|
||||
# Inherit config from this other config file
|
||||
# so I only need to overwrite some values
|
||||
inherits: dev
|
||||
|
||||
app_name: "Super Graph Production"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: false
|
||||
@ -5,15 +9,17 @@ web_ui: false
|
||||
# debug, info, warn, error, fatal, panic, disable
|
||||
log_level: "info"
|
||||
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: true
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: true
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: always
|
||||
auth_fail_block: true
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
@ -26,6 +32,10 @@ enable_tracing: true
|
||||
# Path pointing to where the migrations can be found
|
||||
# migrations_path: migrations
|
||||
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
# secret_key: supercalifajalistics
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
@ -38,110 +48,20 @@ enable_tracing: true
|
||||
# SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
# SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
|
||||
# inflections:
|
||||
# person: people
|
||||
# sheep: sheep
|
||||
|
||||
auth:
|
||||
# Can be 'rails' or 'jwt'
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header. Good for testing
|
||||
header: X-User-ID
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
# Remote cookie store. (memcache or redis)
|
||||
# url: redis://127.0.0.1:6379
|
||||
# password: test
|
||||
# max_idle: 80,
|
||||
# max_active: 12000,
|
||||
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
# sign_salt: "signed encrypted cookie"
|
||||
# auth_salt: "authenticated encrypted cookie"
|
||||
|
||||
# jwt:
|
||||
# provider: auth0
|
||||
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
# public_key_file: /secrets/public_key.pem
|
||||
# public_key_type: ecdsa #rsa
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: {{app_name_slug}}_development
|
||||
dbname: app_production
|
||||
user: postgres
|
||||
password: ''
|
||||
password: postgres
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Define variables here that you want to use in filters
|
||||
# sub-queries must be wrapped in ()
|
||||
variables:
|
||||
account_id: "(select account_id from users where id = $user_id)"
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
# filter: ["{ id: { eq: $user_id } }"]
|
||||
# filter_query: ["{ id: { eq: $user_id } }"]
|
||||
filter_update: ["{ id: { eq: $user_id } }"]
|
||||
filter_delete: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
# Multiple filters are AND'd together
|
||||
filter: [
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }"
|
||||
]
|
||||
|
||||
- name: customers
|
||||
# No filter is used for this field not
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
# remotes:
|
||||
# - name: payments
|
||||
# id: stripe_id
|
||||
# url: http://rails_app:3000/stripe/$id
|
||||
# path: data
|
||||
# # pass_headers:
|
||||
# # - cookie
|
||||
# # - host
|
||||
# set_headers:
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
# - name: posts
|
||||
# filter: ["{ account_id: { _eq: $account_id } }"]
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 5m
|
@ -11,7 +11,7 @@ for (i = 0; i < user_count; i++) {
|
||||
var pwd = fake.password()
|
||||
var data = {
|
||||
full_name: fake.name(),
|
||||
avatar: fake.image_url(),
|
||||
avatar: fake.avatar_url(200),
|
||||
phone: fake.phone(),
|
||||
email: fake.email(),
|
||||
password: pwd,
|
||||
@ -46,10 +46,10 @@ for (i = 0; i < product_count; i++) {
|
||||
var data = {
|
||||
name: fake.beer_name(),
|
||||
description: desc,
|
||||
price: fake.price(),
|
||||
user_id: user.id,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
price: fake.price()
|
||||
//user_id: user.id,
|
||||
//created_at: "now",
|
||||
//updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
@ -57,7 +57,9 @@ for (i = 0; i < product_count; i++) {
|
||||
product(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
}", { data: data }, {
|
||||
user_id: 5
|
||||
})
|
||||
products.push(res.product)
|
||||
}
|
||||
|
||||
|
80
crypto/encrypt.go
Normal file
80
crypto/encrypt.go
Normal file
@ -0,0 +1,80 @@
|
||||
// cryptopasta - basic cryptography examples
|
||||
//
|
||||
// Written in 2015 by George Tankersley <george.tankersley@gmail.com>
|
||||
//
|
||||
// To the extent possible under law, the author(s) have dedicated all copyright
|
||||
// and related and neighboring rights to this software to the public domain
|
||||
// worldwide. This software is distributed without any warranty.
|
||||
//
|
||||
// You should have received a copy of the CC0 Public Domain Dedication along
|
||||
// with this software. If not, see // <http://creativecommons.org/publicdomain/zero/1.0/>.
|
||||
|
||||
// Provides symmetric authenticated encryption using 256-bit AES-GCM with a random nonce.
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// NewEncryptionKey generates a random 256-bit key for Encrypt() and
|
||||
// Decrypt(). It panics if the source of randomness fails.
|
||||
func NewEncryptionKey() [32]byte {
|
||||
key := [32]byte{}
|
||||
_, err := io.ReadFull(rand.Reader, key[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// Encrypt encrypts data using 256-bit AES-GCM. This both hides the content of
|
||||
// the data and provides a check that it hasn't been altered. Output takes the
|
||||
// form nonce|ciphertext|tag where '|' indicates concatenation.
|
||||
func Encrypt(plaintext []byte, key *[32]byte) (ciphertext []byte, err error) {
|
||||
block, err := aes.NewCipher(key[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
_, err = io.ReadFull(rand.Reader, nonce)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gcm.Seal(nonce, nonce, plaintext, nil), nil
|
||||
}
|
||||
|
||||
// Decrypt decrypts data using 256-bit AES-GCM. This both hides the content of
|
||||
// the data and provides a check that it hasn't been altered. Expects input
|
||||
// form nonce|ciphertext|tag where '|' indicates concatenation.
|
||||
func Decrypt(ciphertext []byte, key *[32]byte) (plaintext []byte, err error) {
|
||||
block, err := aes.NewCipher(key[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ciphertext) < gcm.NonceSize() {
|
||||
return nil, errors.New("malformed ciphertext")
|
||||
}
|
||||
|
||||
return gcm.Open(nil,
|
||||
ciphertext[:gcm.NonceSize()],
|
||||
ciphertext[gcm.NonceSize():],
|
||||
nil,
|
||||
)
|
||||
}
|
@ -1,7 +1,10 @@
|
||||
version: '3.4'
|
||||
services:
|
||||
db:
|
||||
image: postgres
|
||||
image: postgres:12
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
@ -34,9 +37,9 @@ services:
|
||||
volumes:
|
||||
- .:/app
|
||||
working_dir: /app
|
||||
command: wu -pattern="*.go" go run main.go serv
|
||||
command: wtc
|
||||
depends_on:
|
||||
- db
|
||||
- rails_app
|
||||
# - rails_app
|
||||
|
||||
# - redis
|
||||
|
19
docs/.vuepress/components/Card.vue
Normal file
19
docs/.vuepress/components/Card.vue
Normal file
@ -0,0 +1,19 @@
|
||||
<template>
|
||||
<div class="shadow bg-white p-4 flex items-start" :class="className">
|
||||
<slot name="image"></slot>
|
||||
<div class="pl-4">
|
||||
<h2 class="p-0">
|
||||
<slot name="title"></slot>
|
||||
</h2>
|
||||
<p>
|
||||
<slot name="body"></slot>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
export default {
|
||||
props: ["className"]
|
||||
}
|
||||
</script>
|
@ -8,21 +8,24 @@
|
||||
<div class="bg-bottom bg-no-repeat bg-cover">
|
||||
<div class="text-center md:text-left pt-24">
|
||||
<h1 v-if="data.heroText !== null" class="text-5xl font-bold text-black pb-0 uppercase">
|
||||
{{ data.heroText || $title || 'Hello' }}
|
||||
<img src="/super-graph.png" width="250" />
|
||||
</h1>
|
||||
|
||||
<p class="text-2xl text-gray-700 leading-tight pb-0">
|
||||
{{ data.tagline || $description || 'Welcome to your VuePress site' }}
|
||||
</p>
|
||||
|
||||
<p class="text-lg text-gray-600 leading-tight">
|
||||
{{ data.longTagline }}
|
||||
<p class="text-4xl text-gray-800 leading-tight mt-1">
|
||||
Build web products faster. Secure high performance GraphQL
|
||||
</p>
|
||||
|
||||
<NavLink
|
||||
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded"
|
||||
:item="actionLink"
|
||||
/>
|
||||
|
||||
<a
|
||||
class="px-4 py-3 my-8 border-2 border-gray-500 text-gray-600 font-bold rounded"
|
||||
href="https://github.com/dosco/super-graph"
|
||||
target="_blank"
|
||||
>Github</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -51,17 +54,26 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="bg-gray-100 mt-10">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
|
||||
<div class="pb-8 hidden md:block ">
|
||||
<img src="arch-basic.svg">
|
||||
</div>
|
||||
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
What is {{ data.heroText }}?
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="flex flex-wrap">
|
||||
<div class="md:w-2/4">
|
||||
<img src="/graphql.png">
|
||||
@ -112,7 +124,7 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="overflow-hidden bg-indigo-900" style="height: 730px">
|
||||
<div class="overflow-hidden bg-indigo-900">
|
||||
<div class="container mx-auto py-20">
|
||||
<img src="/super-graph-web-ui.png">
|
||||
</div>
|
||||
@ -125,7 +137,7 @@
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
<small class="text-sm">Download the Docker compose config for the demo</small>
|
||||
<pre>‣ curl -L -o demo.yml https://bit.ly/2mq05lW</pre>
|
||||
<pre>‣ curl -L -o demo.yml https://bit.ly/2FZS0uw</pre>
|
||||
|
||||
<small class="text-sm">Setup the demo database</small>
|
||||
<pre>‣ docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed</pre>
|
||||
@ -143,10 +155,73 @@
|
||||
</div>
|
||||
|
||||
<div class="border-t py-10">
|
||||
<div class="container mx-auto">
|
||||
<style>.embed-container { position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden; max-width: 100%; } .embed-container iframe, .embed-container object, .embed-container embed { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }</style>
|
||||
<div class="embed-container shadow">
|
||||
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen></iframe>
|
||||
<div class="block md:hidden w-100">
|
||||
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen style="width: 100%; height: 250px;">
|
||||
</iframe>
|
||||
</div>
|
||||
|
||||
<div class="container mx-auto flex flex-col md:flex-row items-center">
|
||||
<div class="w-100 md:w-1/2 p-8">
|
||||
<h1 class="text-2xl font-bold">GraphQL the future of APIs</h1>
|
||||
<p class="text-xl text-gray-600">Keeping a tight and fast development loop helps you iterate quickly. Leveraging technology like Super Graph focuses your team on building the core product and not reinventing wheels. GraphQL eliminate the dependency on the backend engineering and keeps the things moving fast</p>
|
||||
</div>
|
||||
|
||||
<div class="hidden md:block md:w-1/2">
|
||||
<style>.embed-container { position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden; max-width: 100%; } .embed-container iframe, .embed-container object, .embed-container embed { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }</style>
|
||||
<div class="embed-container shadow">
|
||||
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen >
|
||||
</iframe>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-gray-200 mt-10">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
Build Secure Apps
|
||||
</h1>
|
||||
<div class="flex flex-col text-2xl md:text-3xl">
|
||||
<card className="mb-1 p-8">
|
||||
<template #image><font-awesome-icon icon="portrait" class="text-red-500" /></template>
|
||||
<template #title>Role Based Access Control</template>
|
||||
<template #body>Dynamically assign roles like admin, manager or anon to specific users. Generate role specific queries at runtime. For example admins can get all users while others can only fetch their own user.</template>
|
||||
</card>
|
||||
<card className="mb-1 p-8">
|
||||
<template #image><font-awesome-icon icon="shield-alt" class="text-blue-500" /></template>
|
||||
<template #title>Prepared Statements</template>
|
||||
<template #body>An additional layer of protection from a variety of security issues like SQL injection. In production mode all queries are precompiled into prepared statements so only those can be executed. This also significantly speeds up all queries.</template>
|
||||
</card>
|
||||
<card className="p-8">
|
||||
<template #image><font-awesome-icon icon="lock" class="text-green-500"/></template>
|
||||
<template #title>Fuzz Tested Code</template>
|
||||
<template #body>Fuzzing is done by complex software that generates massives amounts of random input to detect if code is free of security bugs. Google uses fuzzing to protects everything from their cloud infrastructure to the Chrome browser.</template>
|
||||
</card>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
More Features
|
||||
</h1>
|
||||
<div class="flex flex-col md:flex-row text-2xl md:text-3xl">
|
||||
<card className="mr-0 md:mr-1 mb-1 flex-col w-100 md:w-1/3 items-center">
|
||||
<template #image><img src="/arch-remote-join.svg" class="h-64"></template>
|
||||
<template #title>Remote Joins</template>
|
||||
<template #body>A powerful feature that allows you to query your database and remote REST APIs at the same time. For example fetch a user from the DB, his tweets from Twitter and his payments from Stripe with a single GraphQL query.</template>
|
||||
</card>
|
||||
<card className="mr-0 md:mr-1 mb-1 flex-col w-100 md:w-1/3">
|
||||
<template #image><img src="/arch-search.svg" class="h-64"></template>
|
||||
<template #title>Full Text Search</template>
|
||||
<template #body>Postgres has excellent full-text search built-in. You don't need another expensive service. Super Graph makes it super easy to use with keyword ranking and highlighting also supported.</template>
|
||||
</card>
|
||||
<card className="mb-1 flex-col w-100 md:w-1/3">
|
||||
<template #image><img src="/arch-bulk.svg" class="h-64"></template>
|
||||
<template #title>Bulk Inserts</template>
|
||||
<template #body>Efficiently insert, update and delete multiple items with a single query. Upserts are also supported</template>
|
||||
</card>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -164,9 +239,17 @@
|
||||
<script>
|
||||
import NavLink from '@theme/components/NavLink.vue'
|
||||
import Navbar from '@theme/components/Navbar.vue'
|
||||
import Card from './Card.vue'
|
||||
|
||||
|
||||
import { library } from '@fortawesome/fontawesome-svg-core'
|
||||
import { faPortrait, faShieldAlt, faLock } from '@fortawesome/free-solid-svg-icons'
|
||||
import { FontAwesomeIcon } from '@fortawesome/vue-fontawesome'
|
||||
|
||||
library.add(faPortrait, faShieldAlt, faLock)
|
||||
|
||||
export default {
|
||||
components: { NavLink, Navbar },
|
||||
components: { NavLink, Navbar, FontAwesomeIcon, Card },
|
||||
|
||||
computed: {
|
||||
data () {
|
||||
|
@ -1,20 +1,41 @@
|
||||
let ogprefix = 'og: http://ogp.me/ns#'
|
||||
let title = 'Super Graph'
|
||||
let description = 'An instant GraphQL API for your app. No code needed.'
|
||||
let color = '#f42525'
|
||||
|
||||
module.exports = {
|
||||
title: 'Super Graph',
|
||||
description: 'Get an instant GraphQL API for your Rails apps.',
|
||||
title: title,
|
||||
description: description,
|
||||
|
||||
themeConfig: {
|
||||
logo: '/hologram.svg',
|
||||
nav: [
|
||||
{ text: 'Docs', link: '/guide' },
|
||||
{ text: 'Deploy', link: '/deploy' },
|
||||
{ text: 'Internals', link: '/internals' },
|
||||
{ text: 'Github', link: 'https://github.com/dosco/super-graph' },
|
||||
{ text: 'Docker', link: 'https://hub.docker.com/r/dosco/super-graph/builds' },
|
||||
{ text: 'Join Chat', link: 'https://discord.gg/NKdXBc' },
|
||||
|
||||
],
|
||||
serviceWorker: {
|
||||
updatePopup: true
|
||||
},
|
||||
|
||||
head: [
|
||||
//['link', { rel: 'icon', href: `/assets/favicon.ico` }],
|
||||
['meta', { prefix: ogprefix, property: 'og:title', content: title }],
|
||||
['meta', { prefix: ogprefix, property: 'twitter:title', content: title }],
|
||||
['meta', { prefix: ogprefix, property: 'og:type', content: 'website' }],
|
||||
['meta', { prefix: ogprefix, property: 'og:url', content: 'https://supergraph.dev' }],
|
||||
['meta', { prefix: ogprefix, property: 'og:description', content: description }],
|
||||
//['meta', { prefix: ogprefix, property: 'og:image', content: 'https://wireupyourfrontend.com/assets/logo.png' }],
|
||||
// ['meta', { name: 'apple-mobile-web-app-capable', content: 'yes' }],
|
||||
// ['meta', { name: 'apple-mobile-web-app-status-bar-style', content: 'black' }],
|
||||
// ['link', { rel: 'apple-touch-icon', href: `/assets/apple-touch-icon.png` }],
|
||||
// ['link', { rel: 'mask-icon', href: '/assets/safari-pinned-tab.svg', color: color }],
|
||||
// ['meta', { name: 'msapplication-TileImage', content: '/assets/mstile-150x150.png' }],
|
||||
// ['meta', { name: 'msapplication-TileColor', content: color }],
|
||||
],
|
||||
},
|
||||
|
||||
postcss: {
|
||||
|
3
docs/.vuepress/public/arch-basic.svg
Normal file
3
docs/.vuepress/public/arch-basic.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 5.4 KiB |
3
docs/.vuepress/public/arch-bulk.svg
Normal file
3
docs/.vuepress/public/arch-bulk.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="411px" height="240px" viewBox="-0.5 -0.5 411 240"><defs/><g><g transform="translate(1.5,6.5)"><switch><foreignObject style="overflow:visible;" pointer-events="none" width="379" height="232" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;"><div><div style="font-size: 19px"><font color="#3b3b3b">[{ name: "beer1", description: "99 bottles of"},</font></div></div><div style="font-size: 19px"><div style="font-size: 12px"><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer2", description: "98 bottles of"},</font></div><div style="font-size: 19px"><div style="font-size: 12px"><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer3", description: "97 bottles of"},</font></div><div><div><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer4", description: "96 bottles of"},</font></div></div></div><div><div><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer5", description: "95 bottles of"},</font></div></div></div><div><div><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer6", description: "94 bottles of"},</font></div></div></div><div><div><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer7", description: "93 bottles of"},</font></div></div></div><div><div><div style="font-size: 19px"><font color="#3b3b3b"> { name: "beer8", description: "92 bottles of"}]</font></div></div></div><div><font color="#3b3b3b"><br /></font></div></div></div></div><div><font color="#3b3b3b"> </font></div></div><div style="font-size: 19px"><font color="#3b3b3b"> </font></div></div></div></foreignObject><text x="190" y="122" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g></g></svg>
|
After Width: | Height: | Size: 2.3 KiB |
3
docs/.vuepress/public/arch-remote-join.svg
Normal file
3
docs/.vuepress/public/arch-remote-join.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 5.4 KiB |
3
docs/.vuepress/public/arch-search.svg
Normal file
3
docs/.vuepress/public/arch-search.svg
Normal file
@ -0,0 +1,3 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="426px" height="204px" viewBox="-0.5 -0.5 426 204"><defs/><g><g transform="translate(1.5,6.5)"><switch><foreignObject style="overflow:visible;" pointer-events="none" width="411" height="196" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;"><div><div style="font-size: 19px"><font color="#3b3b3b">query {</font></div><div><span style="font-size: 19px"> <font color="#cc0000">products</font><font color="#3b3b3b">(</font></span><span style="font-size: 19px"><font color="#3b3b3b">search: "</font><font color="#00994d">ale</font><font color="#3b3b3b">", where: { price: { gt: 3 }) {</font></span></div><div><span style="font-size: 19px"> <font color="#0066cc"> id</font></span></div><div><span style="font-size: 19px"><font color="#0066cc"> name</font></span></div><div><span style="font-size: 19px"><font color="#0066cc"> search_rank</font></span></div><div><span style="font-size: 19px"><font color="#0066cc"> search_headline_description</font><br /> <font color="#3b3b3b"> }<br />}</font></span></div></div><div style="font-size: 19px"><br /></div></div></div></foreignObject><text x="206" y="104" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g></g></svg>
|
After Width: | Height: | Size: 1.8 KiB |
BIN
docs/.vuepress/public/super-graph.png
Normal file
BIN
docs/.vuepress/public/super-graph.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
@ -17,14 +17,14 @@ features:
|
||||
details: Easy config file, quick to deploy, No code needed. It just works.
|
||||
- title: High Performance
|
||||
details: Compiles your GraphQL into a fast SQL query in realtime.
|
||||
- title: Built in GO
|
||||
details: Built in Go is a language created at Google to build fast and secure web services.
|
||||
- title: Ruby-on-Rails
|
||||
details: Can read Rails cookies and supports rails database conventions.
|
||||
- title: Serverless
|
||||
details: Instant startup for scale to zero environments like Google Cloud Run, App Engine, AWS Lambda
|
||||
- title: Go Lang
|
||||
details: Go is a language created at Google to build fast and secure web services.
|
||||
- title: Free and Open Source
|
||||
details: Not a VC funded startup. Not even a startup just good old open source code
|
||||
|
||||
footer: MIT Licensed | Copyright © 2018-present Vikram Rangnekar
|
||||
footer: Apache Public License 2.0 | Copyright © 2018-present Vikram Rangnekar
|
||||
---
|
||||
|
1196
docs/guide.md
1196
docs/guide.md
@ -4,37 +4,47 @@ sidebar: auto
|
||||
|
||||
# Guide to Super Graph
|
||||
|
||||
Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries. Also Designed to integrate with your Rails apps.
|
||||
Super Graph is a service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more spending weeks or months writing backend API code. Just make the query you need and Super Graph will do the rest.
|
||||
|
||||
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Support for JWT tokens, DB migrations, seeding and a lot more.
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- Works with Rails database schemas
|
||||
- Automatically learns schemas and relationships
|
||||
- Belongs-To, One-To-Many and Many-To-Many table relationships
|
||||
- Full text search and Aggregations
|
||||
- Rails Auth supported (Redis, Memcache, Cookie)
|
||||
- Role and Attribute based access control
|
||||
- Works with existing Ruby-On-Rails apps
|
||||
- Automatically learns database schemas and relationships
|
||||
- Full text search and aggregations
|
||||
- Rails authentication supported (Redis, Memcache, Cookie)
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join with remote REST APIs
|
||||
- Join database with remote REST APIs
|
||||
- Highly optimized and fast Postgres SQL queries
|
||||
- Support GraphQL queries and mutations
|
||||
- Configure with a simple config file
|
||||
- GraphQL queries and mutations
|
||||
- A simple config file
|
||||
- High performance GO codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Fuzz tested for security
|
||||
- Database migrations tool
|
||||
- Write database seeding scripts in Javascript
|
||||
- Database seeding tool
|
||||
|
||||
|
||||
## Try the demo app
|
||||
|
||||
```bash
|
||||
# download the Docker compose config for the demo
|
||||
curl -L -o demo.yml https://bit.ly/2mq05lW
|
||||
# clone the repository
|
||||
git clone https://github.com/dosco/super-graph
|
||||
|
||||
# run db in background
|
||||
docker-compose up -d db
|
||||
|
||||
# see logs and wait until DB is really UP
|
||||
docker-compose logs db
|
||||
|
||||
# setup the demo rails app & database and run it
|
||||
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
docker-compose run rails_app rake db:create db:migrate db:seed
|
||||
|
||||
# run the demo
|
||||
docker-compose -f demo.yml up
|
||||
docker-compose up
|
||||
|
||||
# signin to the demo app (user1@demo.com / 123456)
|
||||
open http://localhost:3000
|
||||
@ -43,14 +53,14 @@ open http://localhost:3000
|
||||
open http://localhost:8080
|
||||
```
|
||||
|
||||
::: warning DEMO REQUIREMENTS
|
||||
::: tip DEMO REQUIREMENTS
|
||||
This demo requires `docker` you can either install it using `brew` or from the
|
||||
docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/)
|
||||
:::
|
||||
|
||||
#### Trying out GraphQL
|
||||
|
||||
We currently fully support queries and mutations. Support for `subscriptions` is work in progress. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
|
||||
We fully support queries and mutations. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
|
||||
|
||||
#### GQL Query
|
||||
|
||||
@ -72,32 +82,6 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The above GraphQL query returns the JSON result below. It handles all
|
||||
kinds of complexity without you having to writing a line of code.
|
||||
|
||||
For example there is a while greater than `gt` and a limit clause on a child field. And the `avatar` field is renamed to `picture`. The `password` field is blocked and not returned. Finally the relationship between the `users` table and the `products` table is auto discovered and used.
|
||||
|
||||
#### JSON Result
|
||||
|
||||
```json
|
||||
@ -124,19 +108,107 @@ For example there is a while greater than `gt` and a limit clause on a child fie
|
||||
}
|
||||
```
|
||||
|
||||
#### Try with an authenticated user
|
||||
::: tip Testing with a user
|
||||
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI.
|
||||
:::
|
||||
|
||||
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI you'll see when you visit the above link. You can also directly run queries from the commandline like below.
|
||||
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
|
||||
|
||||
#### Querying the GQL endpoint
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# fetch the response json directly from the endpoint using user id 5
|
||||
curl 'http://localhost:8080/api/v1/graphql' \
|
||||
-H 'content-type: application/json' \
|
||||
-H 'X-User-ID: 5' \
|
||||
--data-binary '{"query":"{ products { name price users { email }}}"}'
|
||||
## Why Super Graph
|
||||
|
||||
Let's take a simple example say you want to fetch 5 products priced over 12 dollars along with the photos of the products and the users that owns them. Additionally also fetch the last 10 of your own purchases along with the name and ID of the product you purchased. This is a common type of query to render a view in say an ecommerce app. Lets be honest it's not very exciting write and maintain. Keep in mind the data needed will only continue to grow and change as your app evolves. Developers might find that most ORMs will not be able to do all of this in a single SQL query and will require n+1 queries to fetch all the data and assembly it into the right JSON response.
|
||||
|
||||
What if I told you Super Graph will fetch all this data with a single SQL query and without you having to write a single line of code. Also as your app evolves feel free to evolve the query as you like. In our experience Super Graph saves us hundreds or thousands of man hours that we can put towards the more exciting parts of our app.
|
||||
|
||||
#### GraphQL Query
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(limit: 5, where: { price: { gt: 12 } }) {
|
||||
id
|
||||
name
|
||||
description
|
||||
price
|
||||
photos {
|
||||
url
|
||||
}
|
||||
user {
|
||||
id
|
||||
email
|
||||
picture : avatar
|
||||
full_name
|
||||
}
|
||||
}
|
||||
purchases(
|
||||
limit: 10,
|
||||
order_by: { created_at: desc } ,
|
||||
where: { user_id: { eq: $user_id } }
|
||||
) {
|
||||
id
|
||||
created_at
|
||||
product {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### JSON Result
|
||||
|
||||
```json
|
||||
|
||||
"data": {
|
||||
"products": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Oaked Arrogant Bastard Ale",
|
||||
"description": "Coors lite, European Amber Lager, Perle, 1272 - American Ale II, 38 IBU, 6.4%, 9.7°Blg",
|
||||
"price": 20,
|
||||
"photos: [{
|
||||
"url": "https://www.scienceworld.ca/wp-content/uploads/science-world-beer-flavours.jpg"
|
||||
}],
|
||||
"user": {
|
||||
"id": 1,
|
||||
"email": "user0@demo.com",
|
||||
"picture": "https://robohash.org/sitaliquamquaerat.png?size=300x300&set=set1",
|
||||
"full_name": "Mrs. Wilhemina Hilpert"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
},
|
||||
"purchases": [
|
||||
{
|
||||
"id": 5,
|
||||
"created_at": "2020-01-24T05:34:39.880599",
|
||||
"product": {
|
||||
"id": 45,
|
||||
"name": "Brooklyn Black",
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Get Started
|
||||
@ -146,9 +218,13 @@ Super Graph can generate your initial app for you. The generated app will have c
|
||||
You can then add your database schema to the migrations, maybe create some seed data using the seed script and launch Super Graph. You're now good to go and can start working on your UI frontend in React, Vue or whatever.
|
||||
|
||||
```bash
|
||||
# use the below command to download and install Super Graph. You will need Go 1.13 or above
|
||||
GO111MODULE=on go get -u github.com/dosco/super-graph
|
||||
# Download and install Super Graph. You will need Go 1.13 or above
|
||||
git clone https://github.com/dosco/super-graph && cd super-graph && make install
|
||||
```
|
||||
|
||||
And then create and launch your new app
|
||||
|
||||
```bash
|
||||
# create a new app and change to it's directory
|
||||
super-graph new blog; cd blog
|
||||
|
||||
@ -216,6 +292,12 @@ for (i = 0; i < 10; i++) {
|
||||
}
|
||||
```
|
||||
|
||||
If you want to import a lot of data using a CSV file is the best and fastest option. The `import_csv` command uses the `COPY FROM` Postgres method to load massive amounts of data into tables. The first line of the CSV file must be the header with column names.
|
||||
|
||||
```javascript
|
||||
var post_count = import_csv("posts", "posts.csv")
|
||||
```
|
||||
|
||||
You can generate the following fake data for your seeding purposes. Below is the list of fake data functions supported by the built-in fake data library. For example `fake.image_url()` will generate a fake image url or `fake.shuffle_strings(['hello', 'world', 'cool'])` will generate a randomly shuffled version of that array of strings or `fake.rand_string(['hello', 'world', 'cool'])` will return a random string from the array provided.
|
||||
|
||||
```
|
||||
@ -275,7 +357,7 @@ transmission_gear_type
|
||||
// Text
|
||||
word
|
||||
sentence
|
||||
paragrph
|
||||
paragraph
|
||||
question
|
||||
quote
|
||||
|
||||
@ -475,6 +557,21 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
Multiple tables can also be fetched using a single GraphQL query. This is very fast since the entire query is converted into a single SQL query which the database can efficiently run.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
user {
|
||||
full_name
|
||||
email
|
||||
}
|
||||
products {
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Fetching data
|
||||
|
||||
To fetch a specific `product` by it's ID you can use the `id` argument. The real name id field will be resolved automatically so this query will work even if your id column is named something like `product_id`.
|
||||
@ -497,9 +594,51 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
### Advanced queries
|
||||
### Sorting
|
||||
|
||||
Super Graph support complex queries where you can add filters, ordering,offsets and limits on the query.
|
||||
To sort or ordering results just use the `order_by` argument. This can be combined with `where`, `search`, etc to build complex queries to fit you needs.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(order_by: { cached_votes_total: desc }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
Super Graph support complex queries where you can add filters, ordering,offsets and limits on the query. For example the below query will list all products where the price is greater than 10 and the id is not 5.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(where: {
|
||||
and: {
|
||||
price: { gt: 10 },
|
||||
not: { id: { eq: 5 } }
|
||||
}
|
||||
}) {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Nested where clause targeting related tables
|
||||
|
||||
Sometimes you need to query a table based on a condition that applies to a related table. For example say you need to list all users who belong to an account. This query below will fetch the id and email or all users who belong to the account with id 3.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
users(where: {
|
||||
accounts: { id: { eq: 3 } }
|
||||
}) {
|
||||
id
|
||||
email
|
||||
}
|
||||
}`
|
||||
```
|
||||
|
||||
#### Logical Operators
|
||||
|
||||
@ -586,9 +725,7 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
## Mutations
|
||||
|
||||
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete` database operations. Here are some examples.
|
||||
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
|
||||
|
||||
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
|
||||
|
||||
@ -613,7 +750,7 @@ mutation {
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk insert
|
||||
#### Bulk insert
|
||||
|
||||
```json
|
||||
{
|
||||
@ -659,7 +796,7 @@ mutation {
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk update
|
||||
#### Bulk update
|
||||
|
||||
```json
|
||||
{
|
||||
@ -702,7 +839,7 @@ mutation {
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk delete
|
||||
#### Bulk delete
|
||||
|
||||
```json
|
||||
{
|
||||
@ -743,7 +880,7 @@ mutation {
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk upsert
|
||||
#### Bulk upsert
|
||||
|
||||
```json
|
||||
{
|
||||
@ -771,9 +908,244 @@ mutation {
|
||||
}
|
||||
```
|
||||
|
||||
### Using variables
|
||||
Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest.
|
||||
|
||||
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The build-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
|
||||
### Nested Insert
|
||||
|
||||
Create a product item first and then assign it to a user
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Or it's reverse, create the user first and then his product
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Nested Update
|
||||
|
||||
Update a product item first and then assign it to a user
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(update: $data, id: 5) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Or it's reverse, update a user first and then his product
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"email": "newemail@me.com",
|
||||
"full_name": "The Dude",
|
||||
"product": {
|
||||
"name": "Banana",
|
||||
"price": 1.25,
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
user(update: $data, id: 1) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Pagination
|
||||
|
||||
This is a must have feature of any API. When you want your users to go thought a list page by page or implement some fancy infinite scroll you're going to need pagination. There are two ways to paginate in Super Graph.
|
||||
|
||||
Limit-Offset
|
||||
This is simple enough but also inefficient when working with a large number of total items. Limit, limits the number of items fetched and offset is the point you want to fetch from. The below query will fetch 10 results at a time starting with the 100th item. You will have to keep updating offset (110, 120, 130, etc ) to walk thought the results so make offset a variable.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(limit: 10, offset: 100) {
|
||||
id
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Cursor
|
||||
This is a powerful and highly efficient way to paginate though a large number of results. Infact it does not matter how many total results there are this will always be lighting fast. You can use a cursor to walk forward of backward though the results. If you plan to implement infinite scroll this is the option you should choose.
|
||||
|
||||
When going this route the results will contain a cursor value this is an encrypted string that you don't have to worry about just pass this back in to the next API call and you'll received the next set of results. The cursor value is encrypted since its contents should only matter to Super Graph and not the client. Also since the primary key is used for this feature it's possible you might not want to leak it's value to clients.
|
||||
|
||||
You will need to set this config value to ensure the encrypted cursor data is secure. If not set a random value is used which will change with each deployment breaking older cursor values that clients might be using so best to set it.
|
||||
|
||||
```yaml
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
secret_key: supercalifajalistics
|
||||
```
|
||||
|
||||
Paginating forward through your results
|
||||
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"cursor": "MJoTLbQF4l0GuoDsYmCrpjPeaaIlNpfm4uFU4PQ="
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(first: 10, after: $cursor) {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Paginating backward through your results
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(last: 10, before: $cursor) {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
"data": {
|
||||
"products": [
|
||||
{
|
||||
"slug": "eius-nulla-et-8",
|
||||
"name" "Pale Ale"
|
||||
},
|
||||
{
|
||||
"slug": "sapiente-ut-alias-12",
|
||||
"name" "Brown Ale"
|
||||
}
|
||||
...
|
||||
],
|
||||
"products_cursor": "dJwHassm5+d82rGydH2xQnwNxJ1dcj4/cxkh5Cer"
|
||||
}
|
||||
```
|
||||
|
||||
Nested tables can also have cursors. Requesting multiple cursors are supported on a single request but when paginating using a cursor only one table is currently supported. To explain this better, you can only use a `before` or `after` argument with a cursor value to paginate a single table in a query.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(last: 10) {
|
||||
slug
|
||||
name
|
||||
customers(last: 5) {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Multiple order-by arguments are supported. Super Graph is smart enough to allow cursor based pagination when you also need complex sort order like below.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(
|
||||
last: 10
|
||||
before: $cursor
|
||||
order_by: [ price: desc, total_customers: asc ]) {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Using Variables
|
||||
|
||||
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The built-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
|
||||
and better client side code.
|
||||
|
||||
```javascript
|
||||
@ -793,7 +1165,105 @@ fetch('http://localhost:8080/api/v1/graphql', {
|
||||
.then(res => console.log(res.data));
|
||||
```
|
||||
|
||||
### Full text search
|
||||
## GraphQL with React
|
||||
|
||||
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
|
||||
|
||||
```js
|
||||
import React, { useState, useEffect } from 'react'
|
||||
import graphql from 'graphql.js'
|
||||
|
||||
// Create a GraphQL client pointing to Super Graph
|
||||
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
|
||||
|
||||
const App = () => {
|
||||
const [user, setUser] = useState(null)
|
||||
|
||||
useEffect(() => {
|
||||
async function action() {
|
||||
// Use the GraphQL client to execute a graphQL query
|
||||
// The second argument to the client are the variables you need to pass
|
||||
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
|
||||
setUser(result)
|
||||
}
|
||||
action()
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="App">
|
||||
<h1>{ JSON.stringify(user) }</h1>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
export default App;
|
||||
|
||||
## Advanced Columns
|
||||
|
||||
The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain.
|
||||
|
||||
Super Graph steps in here to help you by supporting these columns right out of the box. It allows you to work with these columns just like you would with tables. Joining data against or modifying array columns using the `connect` or `disconnect` keywords in mutations is fully supported. Another very useful feature is the ability to treat `json` or `binary json (jsonb)` columns as seperate tables, even using them in nested queries joining against related tables. To replicate these features on your own will take a lot of complex SQL. Using Super Graph means you don't have to deal with any of this it just works.
|
||||
|
||||
### Array Columns
|
||||
|
||||
Configure a relationship between an array column `tag_ids` which contains integer id's for tags and the column `id` in the table `tags`.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: posts
|
||||
columns:
|
||||
- name: tag_ids
|
||||
related_to: tags.id
|
||||
|
||||
```
|
||||
|
||||
```graphql
|
||||
query {
|
||||
posts {
|
||||
title
|
||||
tags {
|
||||
name
|
||||
image
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Column
|
||||
|
||||
Configure a JSON column called `tag_count` in the table `products` into a seperate table. This JSON column contains a json array of objects each with a tag id and a count of the number of times the tag was used. As a seperate table you can nest it into your GraphQL query and treat it like table using any of the standard features like `order_by`, `limit`, `where clauses`, etc.
|
||||
|
||||
The configuration below tells Super Graph to create a synthetic table called `tag_count` using the column `tag_count` from the `products` table. And that this new table has two columns `tag_id` and `count` of the listed types and with the defined relationships.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: tag_count
|
||||
table: products
|
||||
columns:
|
||||
- name: tag_id
|
||||
type: bigint
|
||||
related_to: tags.id
|
||||
- name: count
|
||||
type: int
|
||||
```
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products {
|
||||
name
|
||||
tag_counts {
|
||||
count
|
||||
tag {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Full text search
|
||||
|
||||
Every app these days needs search. Enought his often means reaching for something heavy like Solr. While this will work why add complexity to your infrastructure when Postgres has really great
|
||||
and fast full text search built-in. And since it's part of Postgres it's also available in Super Graph.
|
||||
@ -878,6 +1348,224 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
|
||||
end
|
||||
```
|
||||
|
||||
## API Security
|
||||
|
||||
One of the the most common questions I get asked is what happens if a user out on the internet sends queries
|
||||
that we don't want run. For example how do we stop him from fetching all users or the emails of users. Our answer to this is that it is not an issue as this cannot happen, let me explain.
|
||||
|
||||
Super Graph runs in one of two modes `development` or `production`, this is controlled via the config value `production: false` when it's false it's running in development mode and when true, production. In development mode all the **named** queries (including mutations) are saved to the allow list `./config/allow.list`. While in production mode when Super Graph starts only the queries from this allow list file are registered with the database as [prepared statements](https://stackoverflow.com/questions/8263371/how-can-prepared-statements-protect-from-sql-injection-attacks).
|
||||
|
||||
Prepared statements are designed by databases to be fast and secure. They protect against all kinds of sql injection attacks and since they are pre-processed and pre-planned they are much faster to run then raw sql queries. Also there's no GraphQL to SQL compiling happening in production mode which makes your queries lighting fast as they are directly sent to the database with almost no overhead.
|
||||
|
||||
In short in production only queries listed in the allow list file `./config/allow.list` can be used, all other queries will be blocked.
|
||||
|
||||
::: tip How to think about the allow list?
|
||||
The allow list file is essentially a list of all your exposed API calls and the data that passes within them. It's very easy to build tooling to do things like parsing this file within your tests to ensure fields like `credit_card_no` are not accidently leaked. It's a great way to build compliance tooling and ensure your user data is always safe.
|
||||
:::
|
||||
|
||||
This is an example of a named query, `getUserWithProducts` is the name you've given to this query it can be anything you like but should be unique across all you're queries. Only named queries are saved in the allow list in development mode.
|
||||
|
||||
|
||||
```graphql
|
||||
query getUserWithProducts {
|
||||
users {
|
||||
id
|
||||
name
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Authentication
|
||||
|
||||
You can only have one type of auth enabled either Rails or JWT.
|
||||
|
||||
### Ruby on Rails
|
||||
|
||||
Almost all Rails apps use Devise or Warden for authentication. Once the user is
|
||||
authenticated a session is created with the users ID. The session can either be
|
||||
stored in the users browser as a cookie, memcache or redis. If memcache or redis is used then a cookie is set in the users browser with just the session id.
|
||||
|
||||
Super Graph can handle all these variations including the old and new session formats. Just enable the right `auth` config based on how your rails app is configured.
|
||||
|
||||
#### Cookie session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
```
|
||||
|
||||
#### Memcache session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Memcache remote cookie store.
|
||||
url: memcache://127.0.0.1
|
||||
```
|
||||
|
||||
#### Redis session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Redis remote cookie store
|
||||
url: redis://127.0.0.1:6379
|
||||
password: ""
|
||||
max_idle: 80
|
||||
max_active: 12000
|
||||
```
|
||||
|
||||
### JWT Tokens
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: jwt
|
||||
|
||||
jwt:
|
||||
# the two providers are 'auth0' and 'none'
|
||||
provider: auth0
|
||||
secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
public_key_file: /secrets/public_key.pem
|
||||
public_key_type: ecdsa #rsa
|
||||
```
|
||||
|
||||
For JWT tokens we currently support tokens from a provider like Auth0 or if you have a custom solution then we look for the `user_id` in the `subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
|
||||
|
||||
We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there.
|
||||
|
||||
For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file.
|
||||
|
||||
### HTTP Headers
|
||||
|
||||
```yaml
|
||||
header:
|
||||
name: X-AppEngine-QueueName
|
||||
exists: true
|
||||
#value: default
|
||||
```
|
||||
|
||||
Header auth is usually the best option to authenticate requests to the action endpoints. For example you
|
||||
might want to use an action to refresh a materalized view every hour and only want a cron service like the Google AppEngine Cron service to make that request in this case a config similar to the one above will do.
|
||||
|
||||
The `exists: true` parameter ensures that only the existance of the header is checked not its value. The `value` parameter lets you confirm that the value matches the one assgined to the parameter. This helps in the case you are using a shared secret to protect the endpoint.
|
||||
|
||||
### Named Auth
|
||||
|
||||
```yaml
|
||||
# You can add additional named auths to use with actions
|
||||
# In this example actions using this auth can only be
|
||||
# called from the Google Appengine Cron service that
|
||||
# sets a special header to all it's requests
|
||||
auths:
|
||||
- name: from_taskqueue
|
||||
type: header
|
||||
header:
|
||||
name: X-Appengine-Cron
|
||||
exists: true
|
||||
```
|
||||
|
||||
In addition to the default auth configuration you can create additional named auth configurations to be used
|
||||
with features like `actions`. For example while your main GraphQL endpoint uses JWT for authentication you may want to use a header value to ensure your actions can only be called by clients having access to a shared secret
|
||||
or security header.
|
||||
|
||||
## Actions
|
||||
|
||||
Actions is a very useful feature that is currently work in progress. For now the best use case for actions is to
|
||||
refresh database tables like materialized views or call a database procedure to refresh a cache table, etc. An action creates an http endpoint that anyone can call to have the SQL query executed. The below example will create an endpoint `/api/v1/actions/refresh_leaderboard_users` any request send to that endpoint will cause the sql query to be executed. the `auth_name` points to a named auth that should be used to secure this endpoint. In future we have big plans to allow your own custom code to run using actions.
|
||||
|
||||
```yaml
|
||||
actions:
|
||||
- name: refresh_leaderboard_users
|
||||
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
|
||||
auth_name: from_taskqueue
|
||||
```
|
||||
|
||||
#### Using CURL to test a query
|
||||
|
||||
```bash
|
||||
# fetch the response json directly from the endpoint using user id 5
|
||||
curl 'http://localhost:8080/api/v1/graphql' \
|
||||
-H 'content-type: application/json' \
|
||||
-H 'X-User-ID: 5' \
|
||||
--data-binary '{"query":"{ products { name price users { email }}}"}'
|
||||
```
|
||||
|
||||
## Access Control
|
||||
|
||||
It's common for APIs to control what information they return or insert based on the role of the user. In Super Graph we have two primary roles `user` and `anon` the first for users where a `user_id` is available the latter for users where it's not.
|
||||
|
||||
::: tip
|
||||
An authenticated request is one where Super Graph can extract an `user_id` based on the configured authentication method (jwt, rails cookies, etc).
|
||||
:::
|
||||
|
||||
The `user` role can be divided up into further roles based on attributes in the database. For example when fetching a list of users, a normal user can only fetch his own entry while an admin can fetch all the users within a company and an admin user can fetch everyone. In some places this is called Attribute based access control. So in way we support both. Role based access control and Attribute based access control.
|
||||
|
||||
Super Graph allows you to create roles dynamically using a `roles_query` and ` match` config values.
|
||||
|
||||
### Configure RBAC
|
||||
|
||||
```yaml
|
||||
roles_query: "SELECT * FROM users WHERE users.id = $user_id"
|
||||
|
||||
roles:
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns: ["id", "name", "description" ]
|
||||
presets:
|
||||
- created_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
presets:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: users.id = 1
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: []
|
||||
```
|
||||
|
||||
This configuration is relatively simple to follow the `roles_query` parameter is the query that must be run to help figure out a users role. This query can be as complex as you like and include joins with other tables.
|
||||
|
||||
The individual roles are defined under the `roles` parameter and this includes each table the role has a custom setting for. The role is dynamically matched using the `match` parameter for example in the above case `users.id = 1` means that when the `roles_query` is executed a user with the id `1` will be assigned the admin role and those that don't match get the `user` role if authenticated successfully or the `anon` role.
|
||||
|
||||
## Remote Joins
|
||||
|
||||
It often happens that after fetching some data from the DB we need to call another API to fetch some more data and all this combined into a single JSON response. For example along with a list of users you need their last 5 payments from Stripe. This requires you to query your DB for the users and Stripe for the payments. Super Graph handles all this for you also only the fields you requested from the Stripe API are returned.
|
||||
@ -955,114 +1643,69 @@ Even tracing data is availble in the Super Graph web UI if tracing is enabled in
|
||||
|
||||

|
||||
|
||||
## Authentication
|
||||
## Database Relationships
|
||||
|
||||
You can only have one type of auth enabled. You can either pick Rails or JWT.
|
||||
In most cases you don't need this configuration, Super Graph will discover and learn
|
||||
the relationship graph within your database automatically. It does this using `Foreign Key` relationships that you have defined in your database schema.
|
||||
|
||||
### Rails Auth (Devise / Warden)
|
||||
The below configs are only needed in special cases such as when you don't use foreign keys or when you want to create a relationship between two tables where a foreign key is not defined or cannot be defined.
|
||||
|
||||
Almost all Rails apps use Devise or Warden for authentication. Once the user is
|
||||
authenticated a session is created with the users ID. The session can either be
|
||||
stored in the users browser as a cookie, memcache or redis. If memcache or redis is used then a cookie is set in the users browser with just the session id.
|
||||
|
||||
Super Graph can handle all these variations including the old and new session formats. Just enable the right `auth` config based on how your rails app is configured.
|
||||
|
||||
#### Cookie session store
|
||||
For example in the sample below a relationship is defined between the `tags` column on the `posts` table with the `slug` column on the `tags` table. This cannot be defined as using foreign keys since the `tags` column is of type array `text[]` and Postgres for one does not allow foreign keys with array columns.
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
tables:
|
||||
- name: posts
|
||||
columns:
|
||||
- name: tags
|
||||
related_to: tags.slug
|
||||
```
|
||||
|
||||
#### Memcache session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Memcache remote cookie store.
|
||||
url: memcache://127.0.0.1
|
||||
```
|
||||
|
||||
#### Redis session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails:
|
||||
# Redis remote cookie store
|
||||
url: redis://127.0.0.1:6379
|
||||
password: ""
|
||||
max_idle: 80
|
||||
max_active: 12000
|
||||
```
|
||||
|
||||
### JWT Token Auth
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: jwt
|
||||
|
||||
jwt:
|
||||
# the two providers are 'auth0' and 'none'
|
||||
provider: auth0
|
||||
secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
public_key_file: /secrets/public_key.pem
|
||||
public_key_type: ecdsa #rsa
|
||||
```
|
||||
|
||||
For JWT tokens we currently support tokens from a provider like Auth0
|
||||
or if you have a custom solution then we look for the `user_id` in the
|
||||
`subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
|
||||
|
||||
We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there.
|
||||
|
||||
For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file.
|
||||
|
||||
## Easy to setup
|
||||
## Configuration
|
||||
|
||||
Configuration files can either be in YAML or JSON their names are derived from the `GO_ENV` variable, for example `GO_ENV=prod` will cause the `prod.yaml` config file to be used. or `GO_ENV=dev` will use the `dev.yaml`. A path to look for the config files in can be specified using the `-path <folder>` command line argument.
|
||||
|
||||
We're tried to ensure that the config file is self documenting and easy to work with.
|
||||
|
||||
```yaml
|
||||
# Inherit config from this other config file
|
||||
# so I only need to overwrite some values
|
||||
inherits: base
|
||||
|
||||
app_name: "Super Graph Development"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: true
|
||||
debug_level: 1
|
||||
|
||||
# debug, info, warn, error, fatal, panic, disable
|
||||
log_level: "info"
|
||||
# debug, info, warn, error, fatal, panic
|
||||
log_level: "debug"
|
||||
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: true
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: false
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: always
|
||||
auth_fail_block: false
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# Watch the config folder and reload Super Graph
|
||||
# with the new configs when a change is detected
|
||||
reload_on_config_change: true
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./config/migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
@ -1085,8 +1728,9 @@ auth:
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header. Good for testing
|
||||
header: X-User-ID
|
||||
# the user_id via a header for testing.
|
||||
# Disable in production
|
||||
creds_in_header: true
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
@ -1097,10 +1741,10 @@ auth:
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
# Remote cookie store. (memcache or redis)
|
||||
# url: redis://127.0.0.1:6379
|
||||
# password: test
|
||||
# max_idle: 80,
|
||||
# max_active: 12000,
|
||||
# url: redis://redis:6379
|
||||
# password: ""
|
||||
# max_idle: 80
|
||||
# max_active: 12000
|
||||
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
@ -1113,71 +1757,141 @@ auth:
|
||||
# public_key_file: /secrets/public_key.pem
|
||||
# public_key_type: ecdsa #rsa
|
||||
|
||||
# header:
|
||||
# name: dnt
|
||||
# exists: true
|
||||
# value: localhost:8080
|
||||
|
||||
# You can add additional named auths to use with actions
|
||||
# In this example actions using this auth can only be
|
||||
# called from the Google Appengine Cron service that
|
||||
# sets a special header to all it's requests
|
||||
auths:
|
||||
- name: from_taskqueue
|
||||
type: header
|
||||
header:
|
||||
name: X-Appengine-Cron
|
||||
exists: true
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: app_development
|
||||
user: postgres
|
||||
password: ''
|
||||
# pool_size: 10
|
||||
# max_retries: 0
|
||||
# log_level: "debug"
|
||||
password: postgres
|
||||
|
||||
# Define variables here that you want to use in filters
|
||||
#schema: "public"
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
account_id: "select account_id from users where id = $user_id"
|
||||
admin_account_id: "5"
|
||||
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
filter: ["{ user_id: { eq: $user_id } }"]
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blacklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
# Create custom actions with their own api endpoints
|
||||
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
|
||||
# A request to this url will execute the configured SQL query
|
||||
# which in this case refreshes a materialized view in the database.
|
||||
# The auth_name is from one of the configured auths
|
||||
actions:
|
||||
- name: refresh_leaderboard_users
|
||||
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
|
||||
auth_name: from_taskqueue
|
||||
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
# Multiple filters are AND'd together
|
||||
filter: [
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }"
|
||||
]
|
||||
tables:
|
||||
- name: customers
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
pass_headers:
|
||||
- cookie
|
||||
set_headers:
|
||||
- name: Host
|
||||
value: 0.0.0.0
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- name: customers
|
||||
# No filter is used for this field not
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# pass_headers:
|
||||
# - cookie
|
||||
# - host
|
||||
set_headers:
|
||||
- name: Authorization
|
||||
value: Bearer <stripe_api_key>
|
||||
roles_query: "SELECT * FROM users WHERE id = $user_id"
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
roles:
|
||||
- name: anon
|
||||
tables:
|
||||
- name: products
|
||||
limit: 10
|
||||
|
||||
query:
|
||||
columns: ["id", "name", "description" ]
|
||||
aggregation: false
|
||||
|
||||
insert:
|
||||
allow: false
|
||||
|
||||
update:
|
||||
allow: false
|
||||
|
||||
delete:
|
||||
allow: false
|
||||
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
query:
|
||||
limit: 50
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns: ["id", "name", "description" ]
|
||||
disable_functions: false
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns: ["id", "name", "description" ]
|
||||
set:
|
||||
- created_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
set:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: id = 1000
|
||||
tables:
|
||||
- name: users
|
||||
filters: []
|
||||
|
||||
# - name: posts
|
||||
# filter: ["{ account_id: { _eq: $account_id } }"]
|
||||
```
|
||||
|
||||
If deploying into environments like Kubernetes it's useful to be able to configure things like secrets and hosts though environment variables therfore we expose the below environment variables. This is escpecially useful for secrets since they are usually injected in via a secrets management framework ie. Kubernetes Secrets
|
||||
@ -1200,9 +1914,74 @@ SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
```
|
||||
|
||||
## YugabyteDB
|
||||
|
||||
Yugabyte is an open-source, geo-distrubuted cloud-native relational DB that scales horizontally. Super Graph works with Yugabyte right out of the box. If you think you're data needs will outgrow Postgres and you don't really want to deal with sharding then Yugabyte is the way to go. Just point Super Graph to your Yugabyte DB and everything will just work including running migrations, seeding, querying, mutations, etc.
|
||||
|
||||
To use Yugabyte in your local development flow just uncomment the following lines in the `docker-compose.yml` file that is part of your Super Graph app. Also remember to comment out the originl postgres `db` config.
|
||||
|
||||
```yaml
|
||||
# Postgres DB
|
||||
# db:
|
||||
# image: postgres:latest
|
||||
# ports:
|
||||
# - "5432:5432"
|
||||
|
||||
#Standard config to run a single node of Yugabyte
|
||||
yb-master:
|
||||
image: yugabytedb/yugabyte:latest
|
||||
container_name: yb-master-n1
|
||||
command: [ "/home/yugabyte/bin/yb-master",
|
||||
"--fs_data_dirs=/mnt/disk0,/mnt/disk1",
|
||||
"--master_addresses=yb-master-n1:7100",
|
||||
"--replication_factor=1",
|
||||
"--enable_ysql=true"]
|
||||
ports:
|
||||
- "7000:7000"
|
||||
environment:
|
||||
SERVICE_7000_NAME: yb-master
|
||||
|
||||
db:
|
||||
image: yugabytedb/yugabyte:latest
|
||||
container_name: yb-tserver-n1
|
||||
command: [ "/home/yugabyte/bin/yb-tserver",
|
||||
"--fs_data_dirs=/mnt/disk0,/mnt/disk1",
|
||||
"--start_pgsql_proxy",
|
||||
"--tserver_master_addrs=yb-master-n1:7100"]
|
||||
ports:
|
||||
- "9042:9042"
|
||||
- "6379:6379"
|
||||
- "5433:5433"
|
||||
- "9000:9000"
|
||||
environment:
|
||||
SERVICE_5433_NAME: ysql
|
||||
SERVICE_9042_NAME: ycql
|
||||
SERVICE_6379_NAME: yedis
|
||||
SERVICE_9000_NAME: yb-tserver
|
||||
depends_on:
|
||||
- yb-master
|
||||
|
||||
# Environment variables to point Super Graph to Yugabyte
|
||||
# This is required since it uses a different user and port number
|
||||
yourapp_api:
|
||||
image: dosco/super-graph:latest
|
||||
environment:
|
||||
GO_ENV: "development"
|
||||
Uncomment below for Yugabyte DB
|
||||
SG_DATABASE_PORT: 5433
|
||||
SG_DATABASE_USER: yugabyte
|
||||
SG_DATABASE_PASSWORD: yugabyte
|
||||
volumes:
|
||||
- ./config:/config
|
||||
ports:
|
||||
- "8080:8080"
|
||||
depends_on:
|
||||
- db
|
||||
```
|
||||
|
||||
## Developing Super Graph
|
||||
|
||||
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes. And the demo rails app is also launched to make it essier to test changes.
|
||||
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes. And the demo rails app is also launched to make it easier to test changes.
|
||||
|
||||
```bash
|
||||
|
||||
@ -1212,9 +1991,6 @@ brew install yarn
|
||||
# yarn install dependencies and build the web ui
|
||||
(cd web && yarn install && yarn build)
|
||||
|
||||
# generate some stuff the go code needs
|
||||
go generate ./...
|
||||
|
||||
# do this the only the time to setup the database
|
||||
docker-compose run rails_app rake db:create db:migrate db:seed
|
||||
|
||||
@ -1223,6 +1999,10 @@ docker-compose up
|
||||
|
||||
```
|
||||
|
||||
## MIT License
|
||||
## Learn how the code works
|
||||
|
||||
MIT Licensed | Copyright © 2018-present Vikram Rangnekar
|
||||
[Super Graph codebase explained](https://supergraph.dev/internals.html)
|
||||
|
||||
## Apache License 2.0
|
||||
|
||||
Apache Public License 2.0 | Copyright © 2018-present Vikram Rangnekar
|
||||
|
241
docs/internals.md
Normal file
241
docs/internals.md
Normal file
@ -0,0 +1,241 @@
|
||||
---
|
||||
sidebar: auto
|
||||
---
|
||||
|
||||
# Super Graph Codebase Explained
|
||||
|
||||
Super Graph code is made up of a number of packages. We have done our best to keep each package small and focused. Let us begin by looking at some of these packages.
|
||||
|
||||
1. qcode - GraphQL lexer and parser.
|
||||
2. psql - SQL generator
|
||||
3. serv - HTTP Endpoint, Configs, CLI, etc
|
||||
4. rails - Rails cookie and session store decoders
|
||||
|
||||
## QCODE
|
||||
|
||||
This package contains the core of the GraphQL compiler it handling the lexing and parsing of the GraphQL query transforming it into an internal representation called
|
||||
`QCode`.
|
||||
|
||||
This is the first step of the compiling process the `func NewCompiler(c Config)` function creates a new instance of this compiler which has it's own config.
|
||||
|
||||
Keep in mind QCode has no knowledge of the Database structure it is designed to be a fast GraphQL parser. Care is taken to keep memory allocations to a minimum.
|
||||
|
||||
```go
|
||||
const (
|
||||
opQuery
|
||||
opMutate
|
||||
...
|
||||
)
|
||||
|
||||
type QCode struct {
|
||||
Type QType
|
||||
Selects []Select
|
||||
...
|
||||
}
|
||||
|
||||
type Select struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Args map[string]*Node
|
||||
Name string
|
||||
FieldName string
|
||||
Cols []Column
|
||||
Where *Exp
|
||||
OrderBy []*OrderBy
|
||||
DistinctOn []string
|
||||
Paging Paging
|
||||
Children []int32
|
||||
Functions bool
|
||||
Allowed map[string]struct{}
|
||||
PresetMap map[string]string
|
||||
PresetList []string
|
||||
}
|
||||
```
|
||||
|
||||
But before the incoming GraphQL query can be turned into QCode it must first be tokenzied by the lexer `lex.go`. As the tokenzier walks the bytes of the query it generates tokens `item` structs which are then consumed by the next step the parser `parse.go`.
|
||||
|
||||
```go
|
||||
type item struct {
|
||||
typ itemType
|
||||
pos Pos
|
||||
end Pos
|
||||
}
|
||||
```
|
||||
|
||||
For exmple a simple query like `query getUser { user { id } }` will be converted into several tokens like below.
|
||||
|
||||
```go
|
||||
item{itemQuery, 0, 4} // query
|
||||
item{itemName, 6, 12} // getUser
|
||||
item{itemObjOpen, 16, 20} // {
|
||||
...
|
||||
```
|
||||
|
||||
These tokens are then fed into the parser `parse.go` the parser does the work of generating an abstract syntax tree (AST) from the tokens. This AST is an internal representation (data structure) and is not exposed outside the package. Since the AST is a tree a stack `stack.go` is used to walk the tree and generate the QCode AST. The QCode data structure is also a tree (represented as an array). This is then returned to the caller of the compile function.
|
||||
|
||||
```go
|
||||
type Operation struct {
|
||||
Type parserType
|
||||
Name string
|
||||
Args []Arg
|
||||
Fields []Field
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Name string
|
||||
Alias string
|
||||
Args []Arg
|
||||
Children []int32
|
||||
}
|
||||
```
|
||||
|
||||
## PSQL
|
||||
|
||||
This package is responsible for generating Postgres SQL from the QCode AST. There are various GraphQL query types (Query, Mutation, etc). And several more sub types like single root or multi-root queries, various types of mutations (insert, update delete, bulk insert, etc). This package is designed to be able to generate SQL for all of those types.
|
||||
|
||||
In addition to QCode variable data is also passed to the compile function within this package. Variables are decoded to derive what is being inserted and what kind of insert is it single or bulk. This information is not available in the GraphQL query its passed in seperatly via variables. This package is able to put all this together and generate the right SQL code.
|
||||
|
||||
The entry point of this package is in `query.go`. The database schema must be passed in the config object when creating a new compiler instance `NewCompiler`. The functions to extract this schema from the database are also part of this package `tables.go`. The `GetTables` functions fetches all the tables from the database and `GetColumns` fetches columns and relationship information.
|
||||
|
||||
```go
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{conf.Schema, conf.Vars}
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQuery(qc, w)
|
||||
case qcode.QTInsert, qcode.QTUpdate, qcode.QTDelete, qcode.QTUpsert:
|
||||
return co.compileMutation(qc, w, vars)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Unknown operation type %d", qc.Type)
|
||||
}
|
||||
```
|
||||
|
||||
GraphQL, input is first converted to QCode.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
user {
|
||||
id
|
||||
}
|
||||
posts {
|
||||
title
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
SQL, in reality the generated SQL is far more complex single it has to be very efficient, leverage the power of Postgres, support RBAC (Role based access control) and all of this must be done in a single SQL query.
|
||||
|
||||
```sql
|
||||
SELECT users.id, posts.title FROM users, posts;
|
||||
```
|
||||
|
||||
## SERV
|
||||
|
||||
The `serv` package constains most of code that turns the above compiler into an HTTP service. It also includes authentication middleware, remote join resolvers, config parsering, database migrations and seeding commands.
|
||||
|
||||
Another big feature that this package handles is the `allow.list` management code. In production mode parsing the allow list file and registering prepared statements to adding GraphQL queries to this file in development mode.
|
||||
|
||||
Currently the following global variables are referrenced across the package. In future I'd prefer to move these into a context struct and pass that around instead.
|
||||
|
||||
```go
|
||||
var (
|
||||
logger zerolog.Logger // logger for everything but errors
|
||||
errlog zerolog.Logger // logger for errors includes line numbers
|
||||
conf *config // parsed config
|
||||
confPath string // path to the config file
|
||||
db *pgxpool.Pool // database connection pool
|
||||
schema *psql.DBSchema // database tables, columns and relationships
|
||||
qcompile *qcode.Compiler // qcode compiler
|
||||
pcompile *psql.Compiler // postgres sql compiler
|
||||
)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
There are several unit tests and benchmark tests `parse_test.go`) included. There are also scripts included for memory `pprof_cpu.sh` and cpu `pprof_cpu.sh` profiling.
|
||||
|
||||
```go
|
||||
// Test to ensure synthetic tables gnerate the correct SQL
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('me', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT ) AS "json_row_0")) AS "json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil, "user")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can run tests within each package or across the entire app. It is usually the fastest to first write a test and then build the feature to satisfy it.
|
||||
|
||||
```
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
Memory profiling can help find where allocations are happining within the package code.
|
||||
|
||||
```bash
|
||||
$ cd ./psql
|
||||
$ ./pprof_mem.sh
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompile-8 52567 19401 ns/op 3918 B/op 61 allocs/op
|
||||
BenchmarkCompileParallel-8 219548 5684 ns/op 3938 B/op 61 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 2.582s
|
||||
Type: alloc_space
|
||||
Time: Nov 29, 2019 at 11:59pm (EST)
|
||||
Entering interactive mode (type "help" for commands, "o" for options)
|
||||
(pprof) top
|
||||
Showing nodes accounting for 880.59MB, 80.63% of 1092.14MB total
|
||||
Dropped 33 nodes (cum <= 5.46MB)
|
||||
Showing top 10 nodes out of 35
|
||||
flat flat% sum% cum cum%
|
||||
22MB 2.01% 2.01% 903.57MB 82.73% github.com/dosco/super-graph/qcode.(*Compiler).Compile
|
||||
0 0% 2.01% 862.98MB 79.02% github.com/dosco/super-graph/psql.BenchmarkCompileParallel.func1
|
||||
0 0% 2.01% 862.98MB 79.02% testing.(*B).RunParallel.func1
|
||||
461.95MB 42.30% 44.31% 760.53MB 69.64% github.com/dosco/super-graph/qcode.(*Compiler).compileQuery
|
||||
396.63MB 36.32% 80.63% 396.63MB 36.32% github.com/dosco/super-graph/util.NewStack
|
||||
0 0% 80.63% 252.07MB 23.08% github.com/dosco/super-graph/qcode.(*Compiler).compileArgs
|
||||
0 0% 80.63% 228.15MB 20.89% testing.(*B).runN
|
||||
0 0% 80.63% 227.63MB 20.84% github.com/dosco/super-graph/psql.BenchmarkCompile
|
||||
0 0% 80.63% 227.63MB 20.84% testing.(*B).launch
|
||||
0 0% 80.63% 187.04MB 17.13% github.com/dosco/super-graph/psql.(*Compiler).Compile
|
||||
```
|
||||
|
||||
## Benchmarking
|
||||
|
||||
Most packages contain benchmark tests to ensure new features don't introduce a significant regression to performance.
|
||||
|
||||
```bash
|
||||
$ cd ./psql
|
||||
$ go test -v -run=xx -bench=.
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompile-8 60775 19076 ns/op 3919 B/op 61 allocs/op
|
||||
BenchmarkCompileParallel-8 207847 5172 ns/op 3937 B/op 61 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 2.530s
|
||||
```
|
||||
|
||||
## Reach out
|
||||
|
||||
If you'd like me to explain other parts of the code please reach out over Twitter or Discord. I'll keep adding to this doc as I get time.
|
@ -10,5 +10,10 @@
|
||||
"tailwindcss": "^1.0.6",
|
||||
"vuepress": "^1.0.0",
|
||||
"webpack-dev-middleware": "3.6.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortawesome/fontawesome-svg-core": "^1.2.25",
|
||||
"@fortawesome/free-solid-svg-icons": "^5.11.2",
|
||||
"@fortawesome/vue-fontawesome": "^0.1.7"
|
||||
}
|
||||
}
|
||||
|
858
docs/yarn.lock
858
docs/yarn.lock
@ -10,17 +10,17 @@
|
||||
"@babel/highlight" "^7.0.0"
|
||||
|
||||
"@babel/core@^7.0.0":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.6.2.tgz#069a776e8d5e9eefff76236bc8845566bd31dd91"
|
||||
integrity sha512-l8zto/fuoZIbncm+01p8zPSDZu/VuuJhAfA7d/AbzM09WR7iVhavvfNDYCNpo1VvLk6E6xgAoP9P+/EMJHuRkQ==
|
||||
version "7.6.4"
|
||||
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.6.4.tgz#6ebd9fe00925f6c3e177bb726a188b5f578088ff"
|
||||
integrity sha512-Rm0HGw101GY8FTzpWSyRbki/jzq+/PkNQJ+nSulrdY6gFGOsNseCqD6KHRYe2E+EdzuBdr2pxCp6s4Uk6eJ+XQ==
|
||||
dependencies:
|
||||
"@babel/code-frame" "^7.5.5"
|
||||
"@babel/generator" "^7.6.2"
|
||||
"@babel/generator" "^7.6.4"
|
||||
"@babel/helpers" "^7.6.2"
|
||||
"@babel/parser" "^7.6.2"
|
||||
"@babel/parser" "^7.6.4"
|
||||
"@babel/template" "^7.6.0"
|
||||
"@babel/traverse" "^7.6.2"
|
||||
"@babel/types" "^7.6.0"
|
||||
"@babel/traverse" "^7.6.3"
|
||||
"@babel/types" "^7.6.3"
|
||||
convert-source-map "^1.1.0"
|
||||
debug "^4.1.0"
|
||||
json5 "^2.1.0"
|
||||
@ -29,12 +29,12 @@
|
||||
semver "^5.4.1"
|
||||
source-map "^0.5.0"
|
||||
|
||||
"@babel/generator@^7.6.2":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.6.2.tgz#dac8a3c2df118334c2a29ff3446da1636a8f8c03"
|
||||
integrity sha512-j8iHaIW4gGPnViaIHI7e9t/Hl8qLjERI6DcV9kEpAIDJsAOrcnXqRS7t+QbhL76pwbtqP+QCQLL0z1CyVmtjjQ==
|
||||
"@babel/generator@^7.6.3", "@babel/generator@^7.6.4":
|
||||
version "7.6.4"
|
||||
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.6.4.tgz#a4f8437287bf9671b07f483b76e3bb731bc97671"
|
||||
integrity sha512-jsBuXkFoZxk0yWLyGI9llT9oiQ2FeTASmRFE32U+aaDTfoE92t78eroO7PTpU/OrYq38hlcDM6vbfLDaOLy+7w==
|
||||
dependencies:
|
||||
"@babel/types" "^7.6.0"
|
||||
"@babel/types" "^7.6.3"
|
||||
jsesc "^2.5.1"
|
||||
lodash "^4.17.13"
|
||||
source-map "^0.5.0"
|
||||
@ -224,10 +224,10 @@
|
||||
esutils "^2.0.2"
|
||||
js-tokens "^4.0.0"
|
||||
|
||||
"@babel/parser@^7.6.0", "@babel/parser@^7.6.2":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.6.2.tgz#205e9c95e16ba3b8b96090677a67c9d6075b70a1"
|
||||
integrity sha512-mdFqWrSPCmikBoaBYMuBulzTIKuXVPtEISFbRRVNwMWpCms/hmE2kRq0bblUHaNRKrjRlmVbx1sDHmjmRgD2Xg==
|
||||
"@babel/parser@^7.6.0", "@babel/parser@^7.6.3", "@babel/parser@^7.6.4":
|
||||
version "7.6.4"
|
||||
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.6.4.tgz#cb9b36a7482110282d5cb6dd424ec9262b473d81"
|
||||
integrity sha512-D8RHPW5qd0Vbyo3qb+YjO5nvUVRTXFLQ/FsDxJU2Nqz4uB5EnUN0ZQSEYpvTIbRuttig1XbHWU5oMeQwQSAA+A==
|
||||
|
||||
"@babel/plugin-proposal-async-generator-functions@^7.2.0":
|
||||
version "7.2.0"
|
||||
@ -361,9 +361,9 @@
|
||||
"@babel/helper-plugin-utils" "^7.0.0"
|
||||
|
||||
"@babel/plugin-transform-block-scoping@^7.3.4":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.6.2.tgz#96c33ab97a9ae500cc6f5b19e04a7e6553360a79"
|
||||
integrity sha512-zZT8ivau9LOQQaOGC7bQLQOT4XPkPXgN2ERfUgk1X8ql+mVkLc4E8eKk+FO3o0154kxzqenWCorfmEXpEZcrSQ==
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.6.3.tgz#6e854e51fbbaa84351b15d4ddafe342f3a5d542a"
|
||||
integrity sha512-7hvrg75dubcO3ZI2rjYTzUrEuh1E9IyDEhhB6qfcooxhDA33xx2MasuLVgdxzcP6R/lipAC6n9ub9maNW6RKdw==
|
||||
dependencies:
|
||||
"@babel/helper-plugin-utils" "^7.0.0"
|
||||
lodash "^4.17.13"
|
||||
@ -479,9 +479,9 @@
|
||||
"@babel/helper-plugin-utils" "^7.0.0"
|
||||
|
||||
"@babel/plugin-transform-named-capturing-groups-regex@^7.3.0":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.6.2.tgz#c1ca0bb84b94f385ca302c3932e870b0fb0e522b"
|
||||
integrity sha512-xBdB+XOs+lgbZc2/4F5BVDVcDNS4tcSKQc96KmlqLEAwz6tpYPEvPdmDfvVG0Ssn8lAhronaRs6Z6KSexIpK5g==
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.6.3.tgz#aaa6e409dd4fb2e50b6e2a91f7e3a3149dbce0cf"
|
||||
integrity sha512-jTkk7/uE6H2s5w6VlMHeWuH+Pcy2lmdwFoeWCVnvIrDUnB5gQqTVI8WfmEAhF2CDEarGrknZcmSFg1+bkfCoSw==
|
||||
dependencies:
|
||||
regexpu-core "^4.6.0"
|
||||
|
||||
@ -622,17 +622,17 @@
|
||||
semver "^5.3.0"
|
||||
|
||||
"@babel/runtime-corejs2@^7.2.0":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime-corejs2/-/runtime-corejs2-7.6.2.tgz#062f8e31f3df30fc1a3dea68aa1bd854e06e9ba6"
|
||||
integrity sha512-wdyVKnTv9Be4YlwF/7pByYNfcl23qC21aAQ0aIaZOo2ZOvhFEyJdBLJClYZ9i+Pmrz7sUQgg/MwbJa2RZTkygg==
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime-corejs2/-/runtime-corejs2-7.6.3.tgz#de3f446b3fb688b98cbd220474d1a7cad909bcb8"
|
||||
integrity sha512-nuA2o+rgX2+PrNTZ063ehncVcg7sn+tU71BB81SaWRVUbGwCOlb0+yQA1e0QqmzOfRSYOxfvf8cosYqFbJEiwQ==
|
||||
dependencies:
|
||||
core-js "^2.6.5"
|
||||
regenerator-runtime "^0.13.2"
|
||||
|
||||
"@babel/runtime@^7.0.0":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.6.2.tgz#c3d6e41b304ef10dcf13777a33e7694ec4a9a6dd"
|
||||
integrity sha512-EXxN64agfUqqIGeEjI5dL5z0Sw0ZwWo1mLTi4mQowCZ42O59b7DRpZAnTC6OqdF28wMBMFKNb/4uFGrVaigSpg==
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.6.3.tgz#935122c74c73d2240cafd32ddb5fc2a6cd35cf1f"
|
||||
integrity sha512-kq6anf9JGjW8Nt5rYfEuGRaEAaH1mkv3Bbu6rYvLOpPh/RusSJXuKPEAoZ7L7gybZkchE8+NV5g9vKF4AGAtsA==
|
||||
dependencies:
|
||||
regenerator-runtime "^0.13.2"
|
||||
|
||||
@ -645,31 +645,55 @@
|
||||
"@babel/parser" "^7.6.0"
|
||||
"@babel/types" "^7.6.0"
|
||||
|
||||
"@babel/traverse@^7.1.0", "@babel/traverse@^7.4.4", "@babel/traverse@^7.5.5", "@babel/traverse@^7.6.2":
|
||||
version "7.6.2"
|
||||
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.6.2.tgz#b0e2bfd401d339ce0e6c05690206d1e11502ce2c"
|
||||
integrity sha512-8fRE76xNwNttVEF2TwxJDGBLWthUkHWSldmfuBzVRmEDWOtu4XdINTgN7TDWzuLg4bbeIMLvfMFD9we5YcWkRQ==
|
||||
"@babel/traverse@^7.1.0", "@babel/traverse@^7.4.4", "@babel/traverse@^7.5.5", "@babel/traverse@^7.6.2", "@babel/traverse@^7.6.3":
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.6.3.tgz#66d7dba146b086703c0fb10dd588b7364cec47f9"
|
||||
integrity sha512-unn7P4LGsijIxaAJo/wpoU11zN+2IaClkQAxcJWBNCMS6cmVh802IyLHNkAjQ0iYnRS3nnxk5O3fuXW28IMxTw==
|
||||
dependencies:
|
||||
"@babel/code-frame" "^7.5.5"
|
||||
"@babel/generator" "^7.6.2"
|
||||
"@babel/generator" "^7.6.3"
|
||||
"@babel/helper-function-name" "^7.1.0"
|
||||
"@babel/helper-split-export-declaration" "^7.4.4"
|
||||
"@babel/parser" "^7.6.2"
|
||||
"@babel/types" "^7.6.0"
|
||||
"@babel/parser" "^7.6.3"
|
||||
"@babel/types" "^7.6.3"
|
||||
debug "^4.1.0"
|
||||
globals "^11.1.0"
|
||||
lodash "^4.17.13"
|
||||
|
||||
"@babel/types@^7.0.0", "@babel/types@^7.2.0", "@babel/types@^7.4.4", "@babel/types@^7.5.5", "@babel/types@^7.6.0":
|
||||
version "7.6.1"
|
||||
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.6.1.tgz#53abf3308add3ac2a2884d539151c57c4b3ac648"
|
||||
integrity sha512-X7gdiuaCmA0uRjCmRtYJNAVCc/q+5xSgsfKJHqMN4iNLILX39677fJE1O40arPMh0TTtS9ItH67yre6c7k6t0g==
|
||||
"@babel/types@^7.0.0", "@babel/types@^7.2.0", "@babel/types@^7.4.4", "@babel/types@^7.5.5", "@babel/types@^7.6.0", "@babel/types@^7.6.3":
|
||||
version "7.6.3"
|
||||
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.6.3.tgz#3f07d96f854f98e2fbd45c64b0cb942d11e8ba09"
|
||||
integrity sha512-CqbcpTxMcpuQTMhjI37ZHVgjBkysg5icREQIEZ0eG1yCNwg3oy+5AaLiOKmjsCj6nqOsa6Hf0ObjRVwokb7srA==
|
||||
dependencies:
|
||||
esutils "^2.0.2"
|
||||
lodash "^4.17.13"
|
||||
to-fast-properties "^2.0.0"
|
||||
|
||||
"@fullhuman/postcss-purgecss@^1.1.0":
|
||||
"@fortawesome/fontawesome-common-types@^0.2.25":
|
||||
version "0.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-0.2.25.tgz#6df015905081f2762e5cfddeb7a20d2e9b16c786"
|
||||
integrity sha512-3RuZPDuuPELd7RXtUqTCfed14fcny9UiPOkdr2i+cYxBoTOfQgxcDoq77fHiiHcgWuo1LoBUpvGxFF1H/y7s3Q==
|
||||
|
||||
"@fortawesome/fontawesome-svg-core@^1.2.25":
|
||||
version "1.2.25"
|
||||
resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-1.2.25.tgz#24b03391d14f0c6171e8cad7057c687b74049790"
|
||||
integrity sha512-MotKnn53JKqbkLQiwcZSBJVYtTgIKFbh7B8+kd05TSnfKYPFmjKKI59o2fpz5t0Hzl35vVGU6+N4twoOpZUrqA==
|
||||
dependencies:
|
||||
"@fortawesome/fontawesome-common-types" "^0.2.25"
|
||||
|
||||
"@fortawesome/free-solid-svg-icons@^5.11.2":
|
||||
version "5.11.2"
|
||||
resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-5.11.2.tgz#2f2f1459743a27902b76655a0d0bc5ec4d945631"
|
||||
integrity sha512-zBue4i0PAZJUXOmLBBvM7L0O7wmsDC8dFv9IhpW5QL4kT9xhhVUsYg/LX1+5KaukWq4/cbDcKT+RT1aRe543sg==
|
||||
dependencies:
|
||||
"@fortawesome/fontawesome-common-types" "^0.2.25"
|
||||
|
||||
"@fortawesome/vue-fontawesome@^0.1.7":
|
||||
version "0.1.7"
|
||||
resolved "https://registry.yarnpkg.com/@fortawesome/vue-fontawesome/-/vue-fontawesome-0.1.7.tgz#121867297cafd141af78c67d92ab9f1ad4b7328b"
|
||||
integrity sha512-YCw2Q2m4fxzyFsPOH3uDYMoJztTD+pT+AAyse4LFpbdrBg+r8ueaVT8BFnXEjrGwMDJJeXrwJ5AOC6q/JWBI4w==
|
||||
|
||||
"@fullhuman/postcss-purgecss@^1.3.0":
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/@fullhuman/postcss-purgecss/-/postcss-purgecss-1.3.0.tgz#d632900d818f4fcf4678e7326923fb838c3e03a7"
|
||||
integrity sha512-zvfS3dPKD2FAtMcXapMJXGbDgEp9E++mLR6lTgSruv6y37uvV5xJ1crVktuC1gvnmMwsa7Zh1m05FeEiz4VnIQ==
|
||||
@ -691,13 +715,13 @@
|
||||
integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==
|
||||
|
||||
"@silvanite/vuepress-plugin-tailwind@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@silvanite/vuepress-plugin-tailwind/-/vuepress-plugin-tailwind-1.1.0.tgz#e5d0c8b0b1127201509196eca359ac9070517202"
|
||||
integrity sha512-pQVMz0knDMMfIuXhEwYSRK2gPW9ds+C9YEX8IF0sdCqiPRlJSNG5oUTIAoKoe2JYHlr7zdQZX8wBQN7FQEVO4Q==
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@silvanite/vuepress-plugin-tailwind/-/vuepress-plugin-tailwind-1.2.0.tgz#2e1e6d10e441b49c7446bbb99e7fe8ec80aee273"
|
||||
integrity sha512-1LWlIa+g1vV6HkwwEmADAhnuZ33oAGidnbrCs7qnK53ApbtWa9K/+0cHCLhDeQ3zqoyotqK7YQR2sm5nCOTYqA==
|
||||
dependencies:
|
||||
"@fullhuman/postcss-purgecss" "^1.1.0"
|
||||
lodash "^4.17.11"
|
||||
tailwindcss "^0.7.4"
|
||||
"@fullhuman/postcss-purgecss" "^1.3.0"
|
||||
lodash "^4.17.15"
|
||||
tailwindcss "^1.1.2"
|
||||
|
||||
"@types/events@*":
|
||||
version "3.0.0"
|
||||
@ -719,9 +743,9 @@
|
||||
integrity sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==
|
||||
|
||||
"@types/node@*":
|
||||
version "12.7.8"
|
||||
resolved "https://registry.yarnpkg.com/@types/node/-/node-12.7.8.tgz#cb1bf6800238898bc2ff6ffa5702c3cadd350708"
|
||||
integrity sha512-FMdVn84tJJdV+xe+53sYiZS4R5yn1mAIxfj+DVoNiQjTYz1+OYmjwEZr1ev9nU0axXwda0QDbYl06QHanRVH3A==
|
||||
version "12.12.3"
|
||||
resolved "https://registry.yarnpkg.com/@types/node/-/node-12.12.3.tgz#ebfe83507ac506bc3486314a8aa395be66af8d23"
|
||||
integrity sha512-opgSsy+cEF9N8MgaVPnWVtdJ3o4mV2aMHvDq7thkQUFt0EuOHJon4rQpJfhjmNHB+ikl0Cd6WhWIErOyQ+f7tw==
|
||||
|
||||
"@types/q@^1.5.1":
|
||||
version "1.5.2"
|
||||
@ -746,9 +770,9 @@
|
||||
svg-tags "^1.0.0"
|
||||
|
||||
"@vue/babel-preset-app@^3.1.1":
|
||||
version "3.11.0"
|
||||
resolved "https://registry.yarnpkg.com/@vue/babel-preset-app/-/babel-preset-app-3.11.0.tgz#52bf79c15560a304a13f4770e3e5530e01dd6173"
|
||||
integrity sha512-fcCq9nuGGx1WGnyaKHvIC8RnWjISXGf1rJH4mN9+bymDfosgDbwnfV4TYvTZlyK1/aTHEEpIoO3XimTXBo7QBw==
|
||||
version "3.12.1"
|
||||
resolved "https://registry.yarnpkg.com/@vue/babel-preset-app/-/babel-preset-app-3.12.1.tgz#24c477052f078f30fdb7735103b14dd1fa2cbfe1"
|
||||
integrity sha512-Zjy5jQaikV1Pz+ri0YgXFS7q4/5wCxB5tRkDOEIt5+4105u0Feb/pvH20nVL6nx9GyXrECFfcm7Yxr/z++OaPQ==
|
||||
dependencies:
|
||||
"@babel/helper-module-imports" "^7.0.0"
|
||||
"@babel/plugin-proposal-class-properties" "^7.0.0"
|
||||
@ -765,15 +789,15 @@
|
||||
core-js "^2.6.5"
|
||||
|
||||
"@vue/babel-preset-jsx@^1.0.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vue/babel-preset-jsx/-/babel-preset-jsx-1.1.0.tgz#c8001329f5b372297a3111a251eb4f9e956c1266"
|
||||
integrity sha512-EeZ9gwEmu79B4A6LMLAw5cPCVYIcbKWgJgJafWtLzh1S+SgERUmTkVQ9Vx4k8zYBiCuxHK3XziZ3VJIMau7THA==
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/@vue/babel-preset-jsx/-/babel-preset-jsx-1.1.1.tgz#3a74642ca0ecea10aae13649df5ff70f9d24a6f5"
|
||||
integrity sha512-SeyndwQZc8MAOkhbJaC34ocTwcKekKkwrwnTMC3YF8VmGp5IQWW5gPIU66bqO9WFBXFA3J3ANsUbP2pj8q8KdQ==
|
||||
dependencies:
|
||||
"@vue/babel-helper-vue-jsx-merge-props" "^1.0.0"
|
||||
"@vue/babel-plugin-transform-vue-jsx" "^1.0.0"
|
||||
"@vue/babel-sugar-functional-vue" "^1.0.0"
|
||||
"@vue/babel-sugar-inject-h" "^1.0.0"
|
||||
"@vue/babel-sugar-v-model" "^1.0.0"
|
||||
"@vue/babel-sugar-v-model" "^1.1.1"
|
||||
"@vue/babel-sugar-v-on" "^1.1.0"
|
||||
|
||||
"@vue/babel-sugar-functional-vue@^1.0.0":
|
||||
@ -790,10 +814,10 @@
|
||||
dependencies:
|
||||
"@babel/plugin-syntax-jsx" "^7.2.0"
|
||||
|
||||
"@vue/babel-sugar-v-model@^1.0.0":
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.0.0.tgz#f4da56aa67f65a349bd2c269a95e72e601af4613"
|
||||
integrity sha512-Pfg2Al0io66P1eO6zUbRIgpyKCU2qTnumiE0lao/wA/uNdb7Dx5Tfd1W6tO5SsByETPnEs8i8+gawRIXX40rFw==
|
||||
"@vue/babel-sugar-v-model@^1.1.1":
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.1.1.tgz#a0f0750fcee20769805a20178299eebd4babf25a"
|
||||
integrity sha512-qiPbdUTiqNQdhXzvWQMVfrYGHCiMmscY7j/cudLxdxWZ8AFhgPRVlniVgaWIT7A1iOjs92e8U6qVyqkf0d4ZrA==
|
||||
dependencies:
|
||||
"@babel/plugin-syntax-jsx" "^7.2.0"
|
||||
"@vue/babel-helper-vue-jsx-merge-props" "^1.0.0"
|
||||
@ -826,18 +850,18 @@
|
||||
source-map "~0.6.1"
|
||||
vue-template-es2015-compiler "^1.9.0"
|
||||
|
||||
"@vuepress/core@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/core/-/core-1.1.0.tgz#32fd2b65a4613085cbd2b812bf67afe3a037dc65"
|
||||
integrity sha512-qC+R9kdTpui9QjQGUXUsmfAbToWOnoYjP2AJqMT/RsKUhQsXAIMe2Z0L/Vw2Z3bmlTUq26v+B1zlFgYzGuyIEQ==
|
||||
"@vuepress/core@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/core/-/core-1.2.0.tgz#8e0c636b7f8676202fdd1ecfbe31bfe245dab2a8"
|
||||
integrity sha512-ZIsUkQIF+h4Yk6q4okoRnRwRhcYePu/kNiL0WWPDGycjai8cFqFjLDP/tJjfTKXmn9A62j2ETjSwaiMxCtDkyw==
|
||||
dependencies:
|
||||
"@babel/core" "^7.0.0"
|
||||
"@vue/babel-preset-app" "^3.1.1"
|
||||
"@vuepress/markdown" "^1.1.0"
|
||||
"@vuepress/markdown-loader" "^1.1.0"
|
||||
"@vuepress/plugin-last-updated" "^1.1.0"
|
||||
"@vuepress/plugin-register-components" "^1.1.0"
|
||||
"@vuepress/shared-utils" "^1.1.0"
|
||||
"@vuepress/markdown" "^1.2.0"
|
||||
"@vuepress/markdown-loader" "^1.2.0"
|
||||
"@vuepress/plugin-last-updated" "^1.2.0"
|
||||
"@vuepress/plugin-register-components" "^1.2.0"
|
||||
"@vuepress/shared-utils" "^1.2.0"
|
||||
autoprefixer "^9.5.1"
|
||||
babel-loader "^8.0.4"
|
||||
cache-loader "^3.0.0"
|
||||
@ -856,34 +880,34 @@
|
||||
postcss-safe-parser "^4.0.1"
|
||||
toml "^3.0.0"
|
||||
url-loader "^1.0.1"
|
||||
vue "^2.5.16"
|
||||
vue-loader "^15.2.4"
|
||||
vue-router "^3.0.2"
|
||||
vue-server-renderer "^2.5.16"
|
||||
vue-template-compiler "^2.5.16"
|
||||
vue "^2.6.10"
|
||||
vue-loader "^15.7.1"
|
||||
vue-router "^3.1.3"
|
||||
vue-server-renderer "^2.6.10"
|
||||
vue-template-compiler "^2.6.10"
|
||||
vuepress-html-webpack-plugin "^3.2.0"
|
||||
vuepress-plugin-container "^2.0.0"
|
||||
vuepress-plugin-container "^2.0.2"
|
||||
webpack "^4.8.1"
|
||||
webpack-chain "^4.6.0"
|
||||
webpack-dev-server "^3.5.1"
|
||||
webpack-merge "^4.1.2"
|
||||
webpackbar "3.2.0"
|
||||
|
||||
"@vuepress/markdown-loader@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/markdown-loader/-/markdown-loader-1.1.0.tgz#ab8ac2d286c255f9fa39ecb2f4542053314825ac"
|
||||
integrity sha512-X4+E9kbFt3OSXKxtQbNxeuzxbXdSMhXz8tliUW+/+1zx7RGn1ApcR0x7Y6/irESUgZ+GxOT3jyiCDZA4usHhLA==
|
||||
"@vuepress/markdown-loader@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/markdown-loader/-/markdown-loader-1.2.0.tgz#f8972014616b4ab46a99c9aaac2dd414d437411c"
|
||||
integrity sha512-gOZzoHjfp/W6t+qKBRdbHS/9TwRnNuhY7V+yFzxofNONFHQULofIN/arG+ptYc2SuqJ541jqudNQW+ldHNMC2w==
|
||||
dependencies:
|
||||
"@vuepress/markdown" "^1.1.0"
|
||||
"@vuepress/markdown" "^1.2.0"
|
||||
loader-utils "^1.1.0"
|
||||
lru-cache "^5.1.1"
|
||||
|
||||
"@vuepress/markdown@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/markdown/-/markdown-1.1.0.tgz#f9095c91019d21dbc3daedfd3773c6d5c29117ec"
|
||||
integrity sha512-O2ivsIkUrSUPDx+9N43XKSOGtprV4G1k6/4o3wZjjCn6GXYRsRE906cFDlbryHxQ49Z7Yfz3gyZIGMnThxLo/w==
|
||||
"@vuepress/markdown@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/markdown/-/markdown-1.2.0.tgz#7c457e0fab52ef8ac4dd1898ae450bc3aec30746"
|
||||
integrity sha512-RLRQmTu5wJbCO4Qv+J0K53o5Ew7nAGItLwWyzCbIUB6pRsya3kqSCViWQVlKlS53zFTmRHuAC9tJMRdzly3mCA==
|
||||
dependencies:
|
||||
"@vuepress/shared-utils" "^1.1.0"
|
||||
"@vuepress/shared-utils" "^1.2.0"
|
||||
markdown-it "^8.4.1"
|
||||
markdown-it-anchor "^5.0.2"
|
||||
markdown-it-chain "^1.3.0"
|
||||
@ -891,43 +915,43 @@
|
||||
markdown-it-table-of-contents "^0.4.0"
|
||||
prismjs "^1.13.0"
|
||||
|
||||
"@vuepress/plugin-active-header-links@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.1.0.tgz#cd62c1712040676035f34fed16a088e1c08811d8"
|
||||
integrity sha512-sa5ySYl/kTyr1AMakeW375wWs1aQ6psiJiSFclxkGvxcuGZ89F27ELvd43DKaETAlH90LcoE/j7TXMA895qXmw==
|
||||
"@vuepress/plugin-active-header-links@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.2.0.tgz#46495c89e51a95e57139be007dffbcae4b229260"
|
||||
integrity sha512-vdi7l96pElJvEmcx6t9DWJNH25TIurS8acjN3+b7o4NzdaszFn5j6klN6WtI4Z+5BVDrxHP5W1F3Ebw8SZyupA==
|
||||
dependencies:
|
||||
lodash.throttle "^4.1.1"
|
||||
lodash.debounce "^4.0.8"
|
||||
|
||||
"@vuepress/plugin-last-updated@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-last-updated/-/plugin-last-updated-1.1.0.tgz#65f2de734f3744026297b4667f3b5276ef99fd06"
|
||||
integrity sha512-x2SaAKWk26RK9O0slnZ55eSlBFYdYjFgqkRIfaOf4f2biWqTa9nzaIbvjzvcx3AZKlOWMl81KRwybhDL8E9OsA==
|
||||
"@vuepress/plugin-last-updated@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-last-updated/-/plugin-last-updated-1.2.0.tgz#7b34065b793848b0482a222b7a6f1b7df3668cdc"
|
||||
integrity sha512-j4uZb/MXDyG+v9QCG3T/rkiaOhC/ib7NKCt1cjn3GOwvWTDmB5UZm9EBhUpbDNrBgxW+SaHOe3kMVNO8bGOTGw==
|
||||
dependencies:
|
||||
cross-spawn "^6.0.5"
|
||||
|
||||
"@vuepress/plugin-nprogress@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-nprogress/-/plugin-nprogress-1.1.0.tgz#ca7106adc7016ed0d90a22555066c11da597ef59"
|
||||
integrity sha512-XhUyAO+mzYFOFupX/pNlPbv0bT596Lk000Q2PhWfRliwUzpUd0/u5Z6B6fasIVj01Yqih/gAGOZpr2ZwSCNJYw==
|
||||
"@vuepress/plugin-nprogress@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-nprogress/-/plugin-nprogress-1.2.0.tgz#ff6166946a0b118a39a562acb57983529afce4d2"
|
||||
integrity sha512-0apt3Dp6XVCOkLViX6seKSEJgARihi+pX3/r8j8ndFp9Y+vmgLFZnQnGE5iKNi1ty+A6PZOK0RQcBjwTAU4pAw==
|
||||
dependencies:
|
||||
nprogress "^0.2.0"
|
||||
|
||||
"@vuepress/plugin-register-components@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-register-components/-/plugin-register-components-1.1.0.tgz#42ea75bcad3fb562fbb86c424136f86e13641162"
|
||||
integrity sha512-HXGdcmBdGHLhI8KHr09GnnZEzgCuaIQx1WBqDNfbigSVKEx910L56ej+Whl6VFd7D0uOLUlW4kb9ELM0sjJpKg==
|
||||
"@vuepress/plugin-register-components@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-register-components/-/plugin-register-components-1.2.0.tgz#95aa0e0af94b2758b26ab98814c43b0f7bcd502b"
|
||||
integrity sha512-C32b8sbGtDEX8I3SUUKS/w2rThiRFiKxmzNcJD996me7VY/4rgmZ8CxGtb6G9wByVoK0UdG1SOkrgOPdSCm80A==
|
||||
dependencies:
|
||||
"@vuepress/shared-utils" "^1.1.0"
|
||||
"@vuepress/shared-utils" "^1.2.0"
|
||||
|
||||
"@vuepress/plugin-search@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-search/-/plugin-search-1.1.0.tgz#3b7a344a7df1bab27f10a46e6b57680c8f5d4c7e"
|
||||
integrity sha512-GoxvcM65ZAZycnsoZJ/wx9F3hXKzzJQdS7lNnAuHrvCheT5tVO1wwMumVP/unZU/59zCQ1PiyReYntLSp5bXVg==
|
||||
"@vuepress/plugin-search@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/plugin-search/-/plugin-search-1.2.0.tgz#0b27c467b7fd42bd4d9e32de0fe2fb81a24bd311"
|
||||
integrity sha512-QU3JfnMfImDAArbJOVH1q1iCDE5QrT99GLpNGo6KQYZWqY1TWAbgyf8C2hQdaI03co1lkU2Wn/iqzHJ5WHlueg==
|
||||
|
||||
"@vuepress/shared-utils@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/shared-utils/-/shared-utils-1.1.0.tgz#9d220ffe54f2d698c56ca5348ba2cb9dd72800da"
|
||||
integrity sha512-zvYfejRRl7y3oavLvAe7dHfCu4XewKnhsyUQ7to6tfxVNoEqzhrl5HcCBwcLlphj792tvTAth5QkVegTgGfsaw==
|
||||
"@vuepress/shared-utils@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/shared-utils/-/shared-utils-1.2.0.tgz#8d9ab40c24f75f027ef32c2ad0169f0f08e949fa"
|
||||
integrity sha512-wo5Ng2/xzsmIYCzvWxgLFlDBp7FkmJp2shAkbSurLNAh1vixhs0+LyDxsk01+m34ktJSp9rTUUsm6khw/Fvo0w==
|
||||
dependencies:
|
||||
chalk "^2.3.2"
|
||||
diacritics "^1.3.0"
|
||||
@ -939,19 +963,20 @@
|
||||
semver "^6.0.0"
|
||||
upath "^1.1.0"
|
||||
|
||||
"@vuepress/theme-default@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/theme-default/-/theme-default-1.1.0.tgz#915c97bb69985d6fccd815f829532d67d828e10a"
|
||||
integrity sha512-U+kFHakSBEXFAdfItyeCbP//q2hm9R8+vnTFjbMMVgRZ2SHPnDUC/7WWGoEUzfEpFHHPrG1OzC9iI/o5v8p5AQ==
|
||||
"@vuepress/theme-default@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vuepress/theme-default/-/theme-default-1.2.0.tgz#3303af21a00031a3482ed1c494508234f545cbf1"
|
||||
integrity sha512-mJxAMYQQv4OrGFsArMlONu8RpCzPUVx81dumkyTT4ay5PXAWTj+WDeFQLOT3j0g9QrDJGnHhbiw2aS+R/0WUyQ==
|
||||
dependencies:
|
||||
"@vuepress/plugin-active-header-links" "^1.1.0"
|
||||
"@vuepress/plugin-nprogress" "^1.1.0"
|
||||
"@vuepress/plugin-search" "^1.1.0"
|
||||
"@vuepress/plugin-active-header-links" "^1.2.0"
|
||||
"@vuepress/plugin-nprogress" "^1.2.0"
|
||||
"@vuepress/plugin-search" "^1.2.0"
|
||||
docsearch.js "^2.5.2"
|
||||
lodash "^4.17.15"
|
||||
stylus "^0.54.5"
|
||||
stylus-loader "^3.0.2"
|
||||
vuepress-plugin-container "^2.0.0"
|
||||
vuepress-plugin-container "^2.0.2"
|
||||
vuepress-plugin-smooth-scroll "^0.0.3"
|
||||
|
||||
"@webassemblyjs/ast@1.8.5":
|
||||
version "1.8.5"
|
||||
@ -1153,9 +1178,9 @@ ajv@^6.1.0, ajv@^6.10.2, ajv@^6.5.5:
|
||||
uri-js "^4.2.2"
|
||||
|
||||
algoliasearch@^3.24.5:
|
||||
version "3.35.0"
|
||||
resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-3.35.0.tgz#03f2900698c7c547fce9fb8fb8d0b9a56c8da405"
|
||||
integrity sha512-Om4aLzkGbUi+Rc3sa8s48CRj2Qe7u5TXS7lK7Z681x2EiAa5Qx5uB/kbp8A6qY6dFDX7vstYRIYZ7t9XgdJ1dw==
|
||||
version "3.35.1"
|
||||
resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-3.35.1.tgz#297d15f534a3507cab2f5dfb996019cac7568f0c"
|
||||
integrity sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==
|
||||
dependencies:
|
||||
agentkeepalive "^2.2.0"
|
||||
debug "^2.6.9"
|
||||
@ -1336,10 +1361,12 @@ async-limiter@~1.0.0:
|
||||
resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd"
|
||||
integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==
|
||||
|
||||
async@^1.5.2:
|
||||
version "1.5.2"
|
||||
resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
|
||||
integrity sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=
|
||||
async@^2.6.2:
|
||||
version "2.6.3"
|
||||
resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff"
|
||||
integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==
|
||||
dependencies:
|
||||
lodash "^4.17.14"
|
||||
|
||||
asynckit@^0.4.0:
|
||||
version "0.4.0"
|
||||
@ -1359,17 +1386,17 @@ autocomplete.js@0.36.0:
|
||||
immediate "^3.2.3"
|
||||
|
||||
autoprefixer@^9.4.5, autoprefixer@^9.5.1, autoprefixer@^9.6.1:
|
||||
version "9.6.1"
|
||||
resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-9.6.1.tgz#51967a02d2d2300bb01866c1611ec8348d355a47"
|
||||
integrity sha512-aVo5WxR3VyvyJxcJC3h4FKfwCQvQWb1tSI5VHNibddCVWrcD1NvlxEweg3TSgiPztMnWfjpy2FURKA2kvDE+Tw==
|
||||
version "9.7.0"
|
||||
resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-9.7.0.tgz#905ec19e50f04545fe9ff131182cc9ab25246901"
|
||||
integrity sha512-j2IRvaCfrUxIiZun9ba4mhJ2omhw4OY88/yVzLO+lHhGBumAAK72PgM6gkbSN8iregPOn1ZlxGkmZh2CQ7X4AQ==
|
||||
dependencies:
|
||||
browserslist "^4.6.3"
|
||||
caniuse-lite "^1.0.30000980"
|
||||
browserslist "^4.7.2"
|
||||
caniuse-lite "^1.0.30001004"
|
||||
chalk "^2.4.2"
|
||||
normalize-range "^0.1.2"
|
||||
num2fraction "^1.2.2"
|
||||
postcss "^7.0.17"
|
||||
postcss-value-parser "^4.0.0"
|
||||
postcss "^7.0.19"
|
||||
postcss-value-parser "^4.0.2"
|
||||
|
||||
aws-sign2@~0.7.0:
|
||||
version "0.7.0"
|
||||
@ -1381,13 +1408,6 @@ aws4@^1.8.0:
|
||||
resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f"
|
||||
integrity sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==
|
||||
|
||||
babel-extract-comments@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-extract-comments/-/babel-extract-comments-1.0.0.tgz#0a2aedf81417ed391b85e18b4614e693a0351a21"
|
||||
integrity sha512-qWWzi4TlddohA91bFwgt6zO/J0X+io7Qp184Fw0m2JYRSTZnJbFR8+07KmzudHCZgOiKRCrjhylwv9Xd8gfhVQ==
|
||||
dependencies:
|
||||
babylon "^6.18.0"
|
||||
|
||||
babel-loader@^8.0.4:
|
||||
version "8.0.6"
|
||||
resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.0.6.tgz#e33bdb6f362b03f4bb141a0c21ab87c501b70dfb"
|
||||
@ -1416,32 +1436,6 @@ babel-plugin-module-resolver@3.2.0:
|
||||
reselect "^3.0.1"
|
||||
resolve "^1.4.0"
|
||||
|
||||
babel-plugin-syntax-object-rest-spread@^6.8.0:
|
||||
version "6.13.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz#fd6536f2bce13836ffa3a5458c4903a597bb3bf5"
|
||||
integrity sha1-/WU28rzhODb/o6VFjEkDpZe7O/U=
|
||||
|
||||
babel-plugin-transform-object-rest-spread@^6.26.0:
|
||||
version "6.26.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz#0f36692d50fef6b7e2d4b3ac1478137a963b7b06"
|
||||
integrity sha1-DzZpLVD+9rfi1LOsFHgTepY7ewY=
|
||||
dependencies:
|
||||
babel-plugin-syntax-object-rest-spread "^6.8.0"
|
||||
babel-runtime "^6.26.0"
|
||||
|
||||
babel-runtime@^6.26.0:
|
||||
version "6.26.0"
|
||||
resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe"
|
||||
integrity sha1-llxwWGaOgrVde/4E/yM3vItWR/4=
|
||||
dependencies:
|
||||
core-js "^2.4.0"
|
||||
regenerator-runtime "^0.11.0"
|
||||
|
||||
babylon@^6.18.0:
|
||||
version "6.18.0"
|
||||
resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3"
|
||||
integrity sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==
|
||||
|
||||
balanced-match@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767"
|
||||
@ -1493,9 +1487,9 @@ binary-extensions@^1.0.0:
|
||||
integrity sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==
|
||||
|
||||
bluebird@^3.1.1, bluebird@^3.5.5:
|
||||
version "3.5.5"
|
||||
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.5.tgz#a8d0afd73251effbbd5fe384a77d73003c17a71f"
|
||||
integrity sha512-5am6HnnfN+urzt4yfg7IgTbotDjIT/u8AJpEt0sIU9FtXfVeezXAPKswrG+xKUCOYAINpSdgZVDU6QFh+cuH3w==
|
||||
version "3.7.1"
|
||||
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.1.tgz#df70e302b471d7473489acf26a93d63b53f874de"
|
||||
integrity sha512-DdmyoGCleJnkbp3nkbxTLJ18rjDsE4yCggEwKNXkeV123sPNfOCYeDoeuOY+F2FrSjO1YXcTU+dsy96KMy+gcg==
|
||||
|
||||
bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.1.1, bn.js@^4.4.0:
|
||||
version "4.11.8"
|
||||
@ -1623,14 +1617,14 @@ browserify-zlib@^0.2.0:
|
||||
dependencies:
|
||||
pako "~1.0.5"
|
||||
|
||||
browserslist@^4.0.0, browserslist@^4.3.4, browserslist@^4.6.3:
|
||||
version "4.7.0"
|
||||
resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.7.0.tgz#9ee89225ffc07db03409f2fee524dc8227458a17"
|
||||
integrity sha512-9rGNDtnj+HaahxiVV38Gn8n8Lr8REKsel68v1sPFfIGEK6uSXTY3h9acgiT1dZVtOOUtifo/Dn8daDQ5dUgVsA==
|
||||
browserslist@^4.0.0, browserslist@^4.3.4, browserslist@^4.7.2:
|
||||
version "4.7.2"
|
||||
resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.7.2.tgz#1bb984531a476b5d389cedecb195b2cd69fb1348"
|
||||
integrity sha512-uZavT/gZXJd2UTi9Ov7/Z340WOSQ3+m1iBVRUknf+okKxonL9P83S3ctiBDtuRmRu8PiCHjqyueqQ9HYlJhxiw==
|
||||
dependencies:
|
||||
caniuse-lite "^1.0.30000989"
|
||||
electron-to-chromium "^1.3.247"
|
||||
node-releases "^1.1.29"
|
||||
caniuse-lite "^1.0.30001004"
|
||||
electron-to-chromium "^1.3.295"
|
||||
node-releases "^1.1.38"
|
||||
|
||||
buffer-from@^1.0.0:
|
||||
version "1.1.1"
|
||||
@ -1801,10 +1795,10 @@ caniuse-api@^3.0.0:
|
||||
lodash.memoize "^4.1.2"
|
||||
lodash.uniq "^4.5.0"
|
||||
|
||||
caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000980, caniuse-lite@^1.0.30000989:
|
||||
version "1.0.30000997"
|
||||
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000997.tgz#ba44a606804f8680894b7042612c2c7f65685b7e"
|
||||
integrity sha512-BQLFPIdj2ntgBNWp9Q64LGUIEmvhKkzzHhUHR3CD5A9Lb7ZKF20/+sgadhFap69lk5XmK1fTUleDclaRFvgVUA==
|
||||
caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001004:
|
||||
version "1.0.30001006"
|
||||
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001006.tgz#5b6e8288792cfa275f007b2819a00ccad7112655"
|
||||
integrity sha512-MXnUVX27aGs/QINz+QG1sWSLDr3P1A3Hq5EUWoIt0T7K24DuvMxZEnh3Y5aHlJW6Bz2aApJdSewdYLd8zQnUuw==
|
||||
|
||||
caseless@~0.12.0:
|
||||
version "0.12.0"
|
||||
@ -1987,20 +1981,15 @@ commander@2.17.x:
|
||||
integrity sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg==
|
||||
|
||||
commander@^2.20.0:
|
||||
version "2.20.1"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.1.tgz#3863ce3ca92d0831dcf2a102f5fb4b5926afd0f9"
|
||||
integrity sha512-cCuLsMhJeWQ/ZpsFTbE765kvVfoeSddc4nU3up4fV+fDBcfUXnbITJ+JzhkdjzOqhURjZgujxaioam4RM9yGUg==
|
||||
version "2.20.3"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
|
||||
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
|
||||
|
||||
commander@~2.19.0:
|
||||
version "2.19.0"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.19.0.tgz#f6198aa84e5b83c46054b94ddedbfed5ee9ff12a"
|
||||
integrity sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg==
|
||||
|
||||
comment-regex@^1.0.0:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/comment-regex/-/comment-regex-1.0.1.tgz#e070d2c4db33231955d0979d27c918fcb6f93565"
|
||||
integrity sha512-IWlN//Yfby92tOIje7J18HkNmWRR7JESA/BK8W7wqY/akITpU5B0JQWnbTjCfdChSrDNb0DrdA9jfAxiiBXyiQ==
|
||||
|
||||
commondir@^1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b"
|
||||
@ -2057,11 +2046,9 @@ consola@^2.6.0:
|
||||
integrity sha512-4sxpH6SGFYLADfUip4vuY65f/gEogrzJoniVhNUYkJHtng0l8ZjnDCqxxrSVRHOHwKxsy8Vm5ONZh1wOR3/l/w==
|
||||
|
||||
console-browserify@^1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
|
||||
integrity sha1-8CQcRXMKn8YyOyBtvzjtx0HQuxA=
|
||||
dependencies:
|
||||
date-now "^0.1.4"
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.2.0.tgz#67063cef57ceb6cf4993a2ab3a55840ae8c49336"
|
||||
integrity sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA==
|
||||
|
||||
console-control-strings@^1.0.0, console-control-strings@~1.1.0:
|
||||
version "1.1.0"
|
||||
@ -2144,10 +2131,10 @@ copy-webpack-plugin@^5.0.2:
|
||||
serialize-javascript "^1.7.0"
|
||||
webpack-log "^2.0.0"
|
||||
|
||||
core-js@^2.4.0, core-js@^2.6.5:
|
||||
version "2.6.9"
|
||||
resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.9.tgz#6b4b214620c834152e179323727fc19741b084f2"
|
||||
integrity sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A==
|
||||
core-js@^2.6.5:
|
||||
version "2.6.10"
|
||||
resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.10.tgz#8a5b8391f8cc7013da703411ce5b585706300d7f"
|
||||
integrity sha512-I39t74+4t+zau64EN1fE5v2W31Adtc/REhzWN+gWRRXg6WH5qAsZm62DHpQ1+Yhe4047T55jvzz7MUqF/dBBlA==
|
||||
|
||||
core-util-is@1.0.2, core-util-is@~1.0.0:
|
||||
version "1.0.2"
|
||||
@ -2285,21 +2272,13 @@ css-select@^2.0.0:
|
||||
domutils "^1.7.0"
|
||||
nth-check "^1.0.2"
|
||||
|
||||
css-tree@1.0.0-alpha.29:
|
||||
version "1.0.0-alpha.29"
|
||||
resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.29.tgz#3fa9d4ef3142cbd1c301e7664c1f352bd82f5a39"
|
||||
integrity sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg==
|
||||
dependencies:
|
||||
mdn-data "~1.1.0"
|
||||
source-map "^0.5.3"
|
||||
|
||||
css-tree@1.0.0-alpha.33:
|
||||
version "1.0.0-alpha.33"
|
||||
resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.33.tgz#970e20e5a91f7a378ddd0fc58d0b6c8d4f3be93e"
|
||||
integrity sha512-SPt57bh5nQnpsTBsx/IXbO14sRc9xXu5MtMAVuo0BaQQmyf0NupNPPSoMaqiAF5tDFafYsTkfeH4Q/HCKXkg4w==
|
||||
css-tree@1.0.0-alpha.37:
|
||||
version "1.0.0-alpha.37"
|
||||
resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.37.tgz#98bebd62c4c1d9f960ec340cf9f7522e30709a22"
|
||||
integrity sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==
|
||||
dependencies:
|
||||
mdn-data "2.0.4"
|
||||
source-map "^0.5.3"
|
||||
source-map "^0.6.1"
|
||||
|
||||
css-unit-converter@^1.1.1:
|
||||
version "1.1.1"
|
||||
@ -2311,11 +2290,6 @@ css-what@2.1, css-what@^2.1.2:
|
||||
resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.3.tgz#a6d7604573365fe74686c3f311c56513d88285f2"
|
||||
integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg==
|
||||
|
||||
css.escape@^1.5.1:
|
||||
version "1.5.1"
|
||||
resolved "https://registry.yarnpkg.com/css.escape/-/css.escape-1.5.1.tgz#42e27d4fa04ae32f931a4b4d4191fa9cddee97cb"
|
||||
integrity sha1-QuJ9T6BK4y+TGktNQZH6nN3ul8s=
|
||||
|
||||
css@^2.0.0:
|
||||
version "2.2.4"
|
||||
resolved "https://registry.yarnpkg.com/css/-/css-2.2.4.tgz#c646755c73971f2bba6a601e2cf2fd71b1298929"
|
||||
@ -2404,12 +2378,12 @@ cssnano@^4.1.10:
|
||||
is-resolvable "^1.0.0"
|
||||
postcss "^7.0.0"
|
||||
|
||||
csso@^3.5.1:
|
||||
version "3.5.1"
|
||||
resolved "https://registry.yarnpkg.com/csso/-/csso-3.5.1.tgz#7b9eb8be61628973c1b261e169d2f024008e758b"
|
||||
integrity sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg==
|
||||
csso@^4.0.2:
|
||||
version "4.0.2"
|
||||
resolved "https://registry.yarnpkg.com/csso/-/csso-4.0.2.tgz#e5f81ab3a56b8eefb7f0092ce7279329f454de3d"
|
||||
integrity sha512-kS7/oeNVXkHWxby5tHVxlhjizRCSv8QdU7hB2FpdAibDU8FjTAolhNjKNTiLzXtUrKT6HwClE81yXwEk1309wg==
|
||||
dependencies:
|
||||
css-tree "1.0.0-alpha.29"
|
||||
css-tree "1.0.0-alpha.37"
|
||||
|
||||
cyclist@^1.0.1:
|
||||
version "1.0.1"
|
||||
@ -2423,11 +2397,6 @@ dashdash@^1.12.0:
|
||||
dependencies:
|
||||
assert-plus "^1.0.0"
|
||||
|
||||
date-now@^0.1.4:
|
||||
version "0.1.4"
|
||||
resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
|
||||
integrity sha1-6vQ5/U1ISK105cx9vvIAZyueNFs=
|
||||
|
||||
de-indent@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/de-indent/-/de-indent-1.0.2.tgz#b2038e846dc33baa5796128d0804b455b8c1e21d"
|
||||
@ -2440,7 +2409,7 @@ debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.9:
|
||||
dependencies:
|
||||
ms "2.0.0"
|
||||
|
||||
debug@^3.0.0, debug@^3.2.5, debug@^3.2.6:
|
||||
debug@^3.0.0, debug@^3.1.1, debug@^3.2.5, debug@^3.2.6:
|
||||
version "3.2.6"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b"
|
||||
integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==
|
||||
@ -2530,11 +2499,6 @@ define-property@^2.0.2:
|
||||
is-descriptor "^1.0.2"
|
||||
isobject "^3.0.1"
|
||||
|
||||
defined@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
|
||||
integrity sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM=
|
||||
|
||||
del@^4.1.1:
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/del/-/del-4.1.1.tgz#9e8f117222ea44a31ff3a156c049b99052a9f0b4"
|
||||
@ -2733,10 +2697,10 @@ ee-first@1.1.1:
|
||||
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
|
||||
integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=
|
||||
|
||||
electron-to-chromium@^1.3.247:
|
||||
version "1.3.268"
|
||||
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.268.tgz#d18f10b064dd0fce39098704896309a8249cb62f"
|
||||
integrity sha512-QkPEya233zGh+1erw/N/GNgLjs+t65wkGX4Yw0X/ZuO75r+4Ropk7toXSUqP3TQ7EIwBDotTks3rbNZ1Kwz8hA==
|
||||
electron-to-chromium@^1.3.295:
|
||||
version "1.3.296"
|
||||
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.296.tgz#a1d4322d742317945285d3ba88966561b67f3ac8"
|
||||
integrity sha512-s5hv+TSJSVRsxH190De66YHb50pBGTweT9XGWYu/LMR20KX6TsjFzObo36CjVAzM+PUeeKSBRtm/mISlCzeojQ==
|
||||
|
||||
elliptic@^6.0.0:
|
||||
version "6.5.1"
|
||||
@ -2774,12 +2738,12 @@ end-of-stream@^1.0.0, end-of-stream@^1.1.0:
|
||||
once "^1.4.0"
|
||||
|
||||
enhanced-resolve@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz#41c7e0bfdfe74ac1ffe1e57ad6a5c6c9f3742a7f"
|
||||
integrity sha512-F/7vkyTtyc/llOIn8oWclcB25KdRaiPBpZYDgJHgh/UHtpgT2p2eldQgtQnLtUvfMKPKxbRaQM/hHkvLHt1Vng==
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz#2937e2b8066cd0fe7ce0990a98f0d71a35189f66"
|
||||
integrity sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==
|
||||
dependencies:
|
||||
graceful-fs "^4.1.2"
|
||||
memory-fs "^0.4.0"
|
||||
memory-fs "^0.5.0"
|
||||
tapable "^1.0.0"
|
||||
|
||||
entities@^1.1.1, entities@~1.1.1:
|
||||
@ -2820,9 +2784,9 @@ error-ex@^1.3.1:
|
||||
is-arrayish "^0.2.1"
|
||||
|
||||
es-abstract@^1.12.0, es-abstract@^1.5.1:
|
||||
version "1.14.2"
|
||||
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.14.2.tgz#7ce108fad83068c8783c3cdf62e504e084d8c497"
|
||||
integrity sha512-DgoQmbpFNOofkjJtKwr87Ma5EW4Dc8fWhD0R+ndq7Oc456ivUfGOOP6oAZTTKl5/CcNMP+EN+e3/iUzgE0veZg==
|
||||
version "1.16.0"
|
||||
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.16.0.tgz#d3a26dc9c3283ac9750dca569586e976d9dcc06d"
|
||||
integrity sha512-xdQnfykZ9JMEiasTAJZJdMWCQ1Vm00NBw79/AWi7ELfZuuPCSOMDZbT9mkOfSctVtfhb+sAAzrm+j//GjjLHLg==
|
||||
dependencies:
|
||||
es-to-primitive "^1.2.0"
|
||||
function-bind "^1.1.1"
|
||||
@ -2832,8 +2796,8 @@ es-abstract@^1.12.0, es-abstract@^1.5.1:
|
||||
is-regex "^1.0.4"
|
||||
object-inspect "^1.6.0"
|
||||
object-keys "^1.1.1"
|
||||
string.prototype.trimleft "^2.0.0"
|
||||
string.prototype.trimright "^2.0.0"
|
||||
string.prototype.trimleft "^2.1.0"
|
||||
string.prototype.trimright "^2.1.0"
|
||||
|
||||
es-to-primitive@^1.2.0:
|
||||
version "1.2.0"
|
||||
@ -3072,9 +3036,9 @@ figgy-pudding@^3.5.1:
|
||||
integrity sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w==
|
||||
|
||||
figures@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/figures/-/figures-3.0.0.tgz#756275c964646163cc6f9197c7a0295dbfd04de9"
|
||||
integrity sha512-HKri+WoWoUgr83pehn/SIgLOMZ9nAWC6dcGj26RY2R4F50u4+RTUz0RCrUlOV3nKRAICW1UGzyb+kcX2qK1S/g==
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/figures/-/figures-3.1.0.tgz#4b198dd07d8d71530642864af2d45dd9e459c4ec"
|
||||
integrity sha512-ravh8VRXqHuMvZt/d8GblBeqDMkdJMBdv/2KntFH+ra5MXkO7nxNKpzQ3n6QD/2da1kH0aWmNISdvhM7gl2gVg==
|
||||
dependencies:
|
||||
escape-string-regexp "^1.0.5"
|
||||
|
||||
@ -3204,15 +3168,6 @@ from2@^2.1.0:
|
||||
inherits "^2.0.1"
|
||||
readable-stream "^2.0.0"
|
||||
|
||||
fs-extra@^4.0.2:
|
||||
version "4.0.3"
|
||||
resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94"
|
||||
integrity sha512-q6rbdDd1o2mAnQreO7YADIxf/Whx4AHBiRf6d+/cVT8h44ss+lHgxf1FemcqDnQt9X3ct4McHr+JMGlYSsK7Cg==
|
||||
dependencies:
|
||||
graceful-fs "^4.1.2"
|
||||
jsonfile "^4.0.0"
|
||||
universalify "^0.1.0"
|
||||
|
||||
fs-extra@^7.0.1:
|
||||
version "7.0.1"
|
||||
resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9"
|
||||
@ -3266,11 +3221,6 @@ function-bind@^1.1.1:
|
||||
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
|
||||
integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
|
||||
|
||||
gather-stream@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/gather-stream/-/gather-stream-1.0.0.tgz#b33994af457a8115700d410f317733cbe7a0904b"
|
||||
integrity sha1-szmUr0V6gRVwDUEPMXczy+egkEs=
|
||||
|
||||
gauge@~2.7.3:
|
||||
version "2.7.4"
|
||||
resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
|
||||
@ -3328,9 +3278,9 @@ glob-to-regexp@^0.3.0:
|
||||
integrity sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=
|
||||
|
||||
glob@^7.0.3, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4:
|
||||
version "7.1.4"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.4.tgz#aa608a2f6c577ad357e1ae5a5c26d9a8d1969255"
|
||||
integrity sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==
|
||||
version "7.1.5"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.5.tgz#6714c69bee20f3c3e64c4dd905553e532b40cdc0"
|
||||
integrity sha512-J9dlskqUXK1OeTOYBEn5s8aMukWMwWfs+rPTn/jn50Ux4MNXVhubL1wu/j2t+H4NVI+cXEcCaYellqaPVGXNqQ==
|
||||
dependencies:
|
||||
fs.realpath "^1.0.0"
|
||||
inflight "^1.0.4"
|
||||
@ -3397,9 +3347,9 @@ good-listener@^1.2.2:
|
||||
delegate "^3.1.2"
|
||||
|
||||
graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0:
|
||||
version "4.2.2"
|
||||
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.2.tgz#6f0952605d0140c1cfdb138ed005775b92d67b02"
|
||||
integrity sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q==
|
||||
version "4.2.3"
|
||||
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.3.tgz#4a12ff1b60376ef09862c2093edd908328be8423"
|
||||
integrity sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==
|
||||
|
||||
gray-matter@^4.0.1:
|
||||
version "4.0.2"
|
||||
@ -3436,11 +3386,6 @@ has-ansi@^2.0.0:
|
||||
dependencies:
|
||||
ansi-regex "^2.0.0"
|
||||
|
||||
has-flag@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa"
|
||||
integrity sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=
|
||||
|
||||
has-flag@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
|
||||
@ -3644,7 +3589,7 @@ http-errors@~1.7.2:
|
||||
resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.4.10.tgz#92c9c1374c35085f75db359ec56cc257cbb93fa4"
|
||||
integrity sha1-ksnBN0w1CF912zWexWzCV8u5P6Q=
|
||||
|
||||
http-proxy-middleware@^0.19.1:
|
||||
http-proxy-middleware@0.19.1:
|
||||
version "0.19.1"
|
||||
resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz#183c7dc4aa1479150306498c210cdaf96080a43a"
|
||||
integrity sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==
|
||||
@ -3707,9 +3652,9 @@ iferr@^0.1.5:
|
||||
integrity sha1-xg7taebY/bazEEofy8ocGS3FtQE=
|
||||
|
||||
ignore-walk@^3.0.1:
|
||||
version "3.0.2"
|
||||
resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.2.tgz#99d83a246c196ea5c93ef9315ad7b0819c35069b"
|
||||
integrity sha512-EXyErtpHbn75ZTsOADsfx6J/FPo6/5cjev46PXrcTpd8z3BoRkXgYu9/JVqrI7tusjmwCZutGeRJeU0Wo1e4Cw==
|
||||
version "3.0.3"
|
||||
resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.3.tgz#017e2447184bfeade7c238e4aefdd1e8f95b1e37"
|
||||
integrity sha512-m7o6xuOaT1aqheYHKf8W6J5pYH85ZI9w077erOzLje3JsB1gkafkAhHHY19dqjulgIZHFm32Cp5uNZgcQqdJKw==
|
||||
dependencies:
|
||||
minimatch "^3.0.4"
|
||||
|
||||
@ -3846,7 +3791,7 @@ is-absolute-url@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6"
|
||||
integrity sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=
|
||||
|
||||
is-absolute-url@^3.0.2:
|
||||
is-absolute-url@^3.0.3:
|
||||
version "3.0.3"
|
||||
resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-3.0.3.tgz#96c6a22b6a23929b11ea0afb1836c36ad4a5d698"
|
||||
integrity sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==
|
||||
@ -4120,11 +4065,6 @@ javascript-stringify@^1.6.0:
|
||||
resolved "https://registry.yarnpkg.com/javascript-stringify/-/javascript-stringify-1.6.0.tgz#142d111f3a6e3dae8f4a9afd77d45855b5a9cce3"
|
||||
integrity sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=
|
||||
|
||||
js-base64@^2.1.9:
|
||||
version "2.5.1"
|
||||
resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.5.1.tgz#1efa39ef2c5f7980bb1784ade4a8af2de3291121"
|
||||
integrity sha512-M7kLczedRMYX4L8Mdh4MzyAMM9O5osx+4FcOQuTvr3A9F2D9S5JXheN0ewNbrvK2UatkTRhL5ejGmGSjNMiZuw==
|
||||
|
||||
js-levenshtein@^1.1.3:
|
||||
version "1.1.6"
|
||||
resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d"
|
||||
@ -4196,9 +4136,9 @@ json5@^1.0.1:
|
||||
minimist "^1.2.0"
|
||||
|
||||
json5@^2.1.0:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.0.tgz#e7a0c62c48285c628d20a10b85c89bb807c32850"
|
||||
integrity sha512-8Mh9h6xViijj36g7Dxi+Y4S6hNGV96vcJZr/SrlHh1LR/pEn/8j/+qIBbs44YKl69Lrfctp4QD+AdWLTMqEZAQ==
|
||||
version "2.1.1"
|
||||
resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.1.tgz#81b6cb04e9ba496f1c7005d07b4368a2638f90b6"
|
||||
integrity sha512-l+3HXD0GEI3huGq1njuqtzYK8OYJyXMkOLtQ53pjWh89tvWS2h6l+1zMkYWqlb57+SiQodKZyvMEFb2X+KrFhQ==
|
||||
dependencies:
|
||||
minimist "^1.2.0"
|
||||
|
||||
@ -4325,6 +4265,11 @@ lodash.clonedeep@^4.5.0:
|
||||
resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef"
|
||||
integrity sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=
|
||||
|
||||
lodash.debounce@^4.0.8:
|
||||
version "4.0.8"
|
||||
resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af"
|
||||
integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168=
|
||||
|
||||
lodash.kebabcase@^4.1.1:
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz#8489b1cb0d29ff88195cceca448ff6d6cc295c36"
|
||||
@ -4350,11 +4295,6 @@ lodash.templatesettings@^4.0.0:
|
||||
dependencies:
|
||||
lodash._reinterpolate "^3.0.0"
|
||||
|
||||
lodash.throttle@^4.1.1:
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4"
|
||||
integrity sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=
|
||||
|
||||
lodash.toarray@^4.4.0:
|
||||
version "4.4.0"
|
||||
resolved "https://registry.yarnpkg.com/lodash.toarray/-/lodash.toarray-4.4.0.tgz#24c4bfcd6b2fba38bfd0594db1179d8e9b656561"
|
||||
@ -4365,7 +4305,7 @@ lodash.uniq@^4.5.0:
|
||||
resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
|
||||
integrity sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=
|
||||
|
||||
lodash@^4.17.11, lodash@^4.17.13, lodash@^4.17.15, lodash@^4.17.3, lodash@^4.17.5:
|
||||
lodash@^4.17.11, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.3, lodash@^4.17.5:
|
||||
version "4.17.15"
|
||||
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548"
|
||||
integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==
|
||||
@ -4435,9 +4375,9 @@ map-visit@^1.0.0:
|
||||
object-visit "^1.0.0"
|
||||
|
||||
markdown-it-anchor@^5.0.2:
|
||||
version "5.2.4"
|
||||
resolved "https://registry.yarnpkg.com/markdown-it-anchor/-/markdown-it-anchor-5.2.4.tgz#d39306fe4c199705b4479d3036842cf34dcba24f"
|
||||
integrity sha512-n8zCGjxA3T+Mx1pG8HEgbJbkB8JFUuRkeTZQuIM8iPY6oQ8sWOPRZJDFC9a/pNg2QkHEjjGkhBEl/RSyzaDZ3A==
|
||||
version "5.2.5"
|
||||
resolved "https://registry.yarnpkg.com/markdown-it-anchor/-/markdown-it-anchor-5.2.5.tgz#dbf13cfcdbffd16a510984f1263e1d479a47d27a"
|
||||
integrity sha512-xLIjLQmtym3QpoY9llBgApknl7pxAcN3WDRc2d3rwpl+/YvDZHPmKscGs+L6E05xf2KrCXPBvosWt7MZukwSpQ==
|
||||
|
||||
markdown-it-chain@^1.3.0:
|
||||
version "1.3.0"
|
||||
@ -4486,11 +4426,6 @@ mdn-data@2.0.4:
|
||||
resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.4.tgz#699b3c38ac6f1d728091a64650b65d388502fd5b"
|
||||
integrity sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA==
|
||||
|
||||
mdn-data@~1.1.0:
|
||||
version "1.1.4"
|
||||
resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-1.1.4.tgz#50b5d4ffc4575276573c4eedb8780812a8419f01"
|
||||
integrity sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA==
|
||||
|
||||
mdurl@^1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e"
|
||||
@ -4510,7 +4445,7 @@ mem@^4.0.0:
|
||||
mimic-fn "^2.0.0"
|
||||
p-is-promise "^2.0.0"
|
||||
|
||||
memory-fs@^0.4.0, memory-fs@^0.4.1:
|
||||
memory-fs@^0.4.1:
|
||||
version "0.4.1"
|
||||
resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552"
|
||||
integrity sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=
|
||||
@ -4518,6 +4453,14 @@ memory-fs@^0.4.0, memory-fs@^0.4.1:
|
||||
errno "^0.1.3"
|
||||
readable-stream "^2.0.1"
|
||||
|
||||
memory-fs@^0.5.0:
|
||||
version "0.5.0"
|
||||
resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.5.0.tgz#324c01288b88652966d161db77838720845a8e3c"
|
||||
integrity sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==
|
||||
dependencies:
|
||||
errno "^0.1.3"
|
||||
readable-stream "^2.0.1"
|
||||
|
||||
merge-descriptors@1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
|
||||
@ -4643,15 +4586,7 @@ minimist@^1.2.0:
|
||||
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284"
|
||||
integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=
|
||||
|
||||
minipass@^2.6.0, minipass@^2.8.6:
|
||||
version "2.8.6"
|
||||
resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.8.6.tgz#620d889ace26356391d010ecb9458749df9b6db5"
|
||||
integrity sha512-lFG7d6g3+/UaFDCOtqPiKAC9zngWWsQZl1g5q6gaONqrjq61SX2xFqXMleQiFVyDpYwa018E9hmlAFY22PCb+A==
|
||||
dependencies:
|
||||
safe-buffer "^5.1.2"
|
||||
yallist "^3.0.0"
|
||||
|
||||
minipass@^2.9.0:
|
||||
minipass@^2.6.0, minipass@^2.8.6, minipass@^2.9.0:
|
||||
version "2.9.0"
|
||||
resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6"
|
||||
integrity sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==
|
||||
@ -4660,9 +4595,9 @@ minipass@^2.9.0:
|
||||
yallist "^3.0.0"
|
||||
|
||||
minizlib@^1.2.1:
|
||||
version "1.3.2"
|
||||
resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.2.tgz#5d24764998f98112586f7e566bd4c0999769dad4"
|
||||
integrity sha512-lsNFqSHdJ21EwKzCp12HHJGxSMtHkCW1EMA9cceG3MkMNARjuWotZnMe3NKNshAvFXpm4loZqmYsCmRwhS2JMw==
|
||||
version "1.3.3"
|
||||
resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d"
|
||||
integrity sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==
|
||||
dependencies:
|
||||
minipass "^2.9.0"
|
||||
|
||||
@ -4695,7 +4630,7 @@ mkdirp@0.3.0:
|
||||
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e"
|
||||
integrity sha1-G79asbqCevI1dRQ0kEJkVfSB/h4=
|
||||
|
||||
mkdirp@0.5.x, mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.1, mkdirp@~0.5.x:
|
||||
mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.1, mkdirp@~0.5.x:
|
||||
version "0.5.1"
|
||||
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
|
||||
integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=
|
||||
@ -4802,10 +4737,10 @@ node-emoji@^1.8.1:
|
||||
dependencies:
|
||||
lodash.toarray "^4.4.0"
|
||||
|
||||
node-forge@0.8.2:
|
||||
version "0.8.2"
|
||||
resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.8.2.tgz#b4bcc59fb12ce77a8825fc6a783dfe3182499c5a"
|
||||
integrity sha512-mXQ9GBq1N3uDCyV1pdSzgIguwgtVpM7f5/5J4ipz12PKWElmPpVWLDuWl8iXmhysr21+WmX/OJ5UKx82wjomgg==
|
||||
node-forge@0.9.0:
|
||||
version "0.9.0"
|
||||
resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.9.0.tgz#d624050edbb44874adca12bb9a52ec63cb782579"
|
||||
integrity sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ==
|
||||
|
||||
node-libs-browser@^2.2.1:
|
||||
version "2.2.1"
|
||||
@ -4852,12 +4787,12 @@ node-pre-gyp@^0.12.0:
|
||||
semver "^5.3.0"
|
||||
tar "^4"
|
||||
|
||||
node-releases@^1.1.29:
|
||||
version "1.1.32"
|
||||
resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.32.tgz#485b35c1bf9b4d8baa105d782f8ca731e518276e"
|
||||
integrity sha512-VhVknkitq8dqtWoluagsGPn3dxTvN9fwgR59fV3D7sLBHe0JfDramsMI8n8mY//ccq/Kkrf8ZRHRpsyVZ3qw1A==
|
||||
node-releases@^1.1.38:
|
||||
version "1.1.39"
|
||||
resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.39.tgz#c1011f30343aff5b633153b10ff691d278d08e8d"
|
||||
integrity sha512-8MRC/ErwNCHOlAFycy9OPca46fQYUjbJRDcZTHVWIGXIjYLM73k70vv3WkYutVnM4cCo4hE0MqBVVZjP6vjISA==
|
||||
dependencies:
|
||||
semver "^5.3.0"
|
||||
semver "^6.3.0"
|
||||
|
||||
nopt@1.0.10:
|
||||
version "1.0.10"
|
||||
@ -4916,9 +4851,9 @@ npm-bundled@^1.0.1:
|
||||
integrity sha512-8/JCaftHwbd//k6y2rEWp6k1wxVfpFzB6t1p825+cUb7Ym2XQfhwIC5KwhrvzZRJu+LtDE585zVaS32+CGtf0g==
|
||||
|
||||
npm-packlist@^1.1.6:
|
||||
version "1.4.4"
|
||||
resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.4.tgz#866224233850ac534b63d1a6e76050092b5d2f44"
|
||||
integrity sha512-zTLo8UcVYtDU3gdeaFu2Xu0n0EvelfHDGuqtNIn5RO7yQj4H1TqNdBc/yZjxnWA0PVB8D3Woyp0i5B43JwQ6Vw==
|
||||
version "1.4.6"
|
||||
resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.6.tgz#53ba3ed11f8523079f1457376dd379ee4ea42ff4"
|
||||
integrity sha512-u65uQdb+qwtGvEJh/DgQgW1Xg7sqeNbmxYyrvlNznaVTjV3E5P6F/EFjM+BVHXl7JJlsdG8A64M0XI8FI/IOlg==
|
||||
dependencies:
|
||||
ignore-walk "^3.0.1"
|
||||
npm-bundled "^1.0.1"
|
||||
@ -5295,22 +5230,6 @@ pbkdf2@^3.0.3:
|
||||
safe-buffer "^5.0.1"
|
||||
sha.js "^2.4.8"
|
||||
|
||||
perfectionist@^2.4.0:
|
||||
version "2.4.0"
|
||||
resolved "https://registry.yarnpkg.com/perfectionist/-/perfectionist-2.4.0.tgz#c147ad3714e126467f1764129ee72df861d47ea0"
|
||||
integrity sha1-wUetNxThJkZ/F2QSnuct+GHUfqA=
|
||||
dependencies:
|
||||
comment-regex "^1.0.0"
|
||||
defined "^1.0.0"
|
||||
minimist "^1.2.0"
|
||||
postcss "^5.0.8"
|
||||
postcss-scss "^0.3.0"
|
||||
postcss-value-parser "^3.3.0"
|
||||
read-file-stdin "^0.2.0"
|
||||
string.prototype.repeat "^0.2.0"
|
||||
vendors "^1.0.0"
|
||||
write-file-stdout "0.0.2"
|
||||
|
||||
performance-now@^2.1.0:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b"
|
||||
@ -5357,14 +5276,14 @@ pkg-up@^2.0.0:
|
||||
dependencies:
|
||||
find-up "^2.1.0"
|
||||
|
||||
portfinder@^1.0.13, portfinder@^1.0.24:
|
||||
version "1.0.24"
|
||||
resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.24.tgz#11efbc6865f12f37624b6531ead1d809ed965cfa"
|
||||
integrity sha512-ekRl7zD2qxYndYflwiryJwMioBI7LI7rVXg3EnLK3sjkouT5eOuhS3gS255XxBksa30VG8UPZYZCdgfGOfkSUg==
|
||||
portfinder@^1.0.13, portfinder@^1.0.25:
|
||||
version "1.0.25"
|
||||
resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.25.tgz#254fd337ffba869f4b9d37edc298059cb4d35eca"
|
||||
integrity sha512-6ElJnHBbxVA1XSLgBp7G1FiCkQdlqGzuF7DswL5tcea+E8UpuvPU7beVAjjRwCioTS9ZluNbu+ZyRvgTsmqEBg==
|
||||
dependencies:
|
||||
async "^1.5.2"
|
||||
debug "^2.2.0"
|
||||
mkdirp "0.5.x"
|
||||
async "^2.6.2"
|
||||
debug "^3.1.1"
|
||||
mkdirp "^0.5.1"
|
||||
|
||||
posix-character-classes@^0.1.0:
|
||||
version "0.1.1"
|
||||
@ -5693,13 +5612,6 @@ postcss-safe-parser@^4.0.1:
|
||||
dependencies:
|
||||
postcss "^7.0.0"
|
||||
|
||||
postcss-scss@^0.3.0:
|
||||
version "0.3.1"
|
||||
resolved "https://registry.yarnpkg.com/postcss-scss/-/postcss-scss-0.3.1.tgz#65c610d8e2a7ee0e62b1835b71b8870734816e4b"
|
||||
integrity sha1-ZcYQ2OKn7g5isYNbcbiHBzSBbks=
|
||||
dependencies:
|
||||
postcss "^5.2.4"
|
||||
|
||||
postcss-selector-parser@^3.0.0:
|
||||
version "3.1.1"
|
||||
resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz#4f875f4afb0c96573d5cf4d74011aee250a7e865"
|
||||
@ -5751,21 +5663,11 @@ postcss-value-parser@^3.0.0, postcss-value-parser@^3.2.3, postcss-value-parser@^
|
||||
resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281"
|
||||
integrity sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==
|
||||
|
||||
postcss-value-parser@^4.0.0:
|
||||
postcss-value-parser@^4.0.2:
|
||||
version "4.0.2"
|
||||
resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.0.2.tgz#482282c09a42706d1fc9a069b73f44ec08391dc9"
|
||||
integrity sha512-LmeoohTpp/K4UiyQCwuGWlONxXamGzCMtFxLq4W1nZVGIQLYvMCJx3yAF9qyyuFpflABI9yVdtJAqbihOsCsJQ==
|
||||
|
||||
postcss@^5.0.8, postcss@^5.2.4:
|
||||
version "5.2.18"
|
||||
resolved "https://registry.yarnpkg.com/postcss/-/postcss-5.2.18.tgz#badfa1497d46244f6390f58b319830d9107853c5"
|
||||
integrity sha512-zrUjRRe1bpXKsX1qAJNJjqZViErVuyEkMTRrwu4ud4sbTtIBRmtaYDrHmcGgmrbsW3MHfmtIf+vJumgQn+PrXg==
|
||||
dependencies:
|
||||
chalk "^1.1.3"
|
||||
js-base64 "^2.1.9"
|
||||
source-map "^0.5.6"
|
||||
supports-color "^3.2.3"
|
||||
|
||||
postcss@^6.0.9:
|
||||
version "6.0.23"
|
||||
resolved "https://registry.yarnpkg.com/postcss/-/postcss-6.0.23.tgz#61c82cc328ac60e677645f979054eb98bc0e3324"
|
||||
@ -5775,10 +5677,10 @@ postcss@^6.0.9:
|
||||
source-map "^0.6.1"
|
||||
supports-color "^5.4.0"
|
||||
|
||||
postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.11, postcss@^7.0.14, postcss@^7.0.17, postcss@^7.0.18, postcss@^7.0.5, postcss@^7.0.6:
|
||||
version "7.0.18"
|
||||
resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.18.tgz#4b9cda95ae6c069c67a4d933029eddd4838ac233"
|
||||
integrity sha512-/7g1QXXgegpF+9GJj4iN7ChGF40sYuGYJ8WZu8DZWnmhQ/G36hfdk3q9LBJmoK+lZ+yzZ5KYpOoxq7LF1BxE8g==
|
||||
postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.11, postcss@^7.0.14, postcss@^7.0.18, postcss@^7.0.19, postcss@^7.0.5, postcss@^7.0.6:
|
||||
version "7.0.21"
|
||||
resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.21.tgz#06bb07824c19c2021c5d056d5b10c35b989f7e17"
|
||||
integrity sha512-uIFtJElxJo29QC753JzhidoAhvp/e/Exezkdhfmt8AymWT6/5B7W1WmponYWkHk2eg6sONyTch0A3nkMPun3SQ==
|
||||
dependencies:
|
||||
chalk "^2.4.2"
|
||||
source-map "^0.6.1"
|
||||
@ -5915,9 +5817,9 @@ punycode@^2.1.0:
|
||||
integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
|
||||
|
||||
purgecss@^1.4.0:
|
||||
version "1.4.0"
|
||||
resolved "https://registry.yarnpkg.com/purgecss/-/purgecss-1.4.0.tgz#79905624ec1c6c8e1f03044bca92dd8a598ba429"
|
||||
integrity sha512-or7/16i7O6DH+NpXqY8NCcWCc940O6PxOgjWAcMTElzgccKOJua1/n6JVtM8UYqoMMWoCyKk+CbLpo4+4mY3BQ==
|
||||
version "1.4.1"
|
||||
resolved "https://registry.yarnpkg.com/purgecss/-/purgecss-1.4.1.tgz#d362e63eb1ed9dd1fa1554b9fd7accb8d54e56dc"
|
||||
integrity sha512-5jONV/D/3nfa+lC425+LA+OWe5/LDn4a79cac+TnzJq3VczwnWlpIDdW275hHsGhkzIlqATQsYFLW7or0cSwNQ==
|
||||
dependencies:
|
||||
glob "^7.1.3"
|
||||
postcss "^7.0.14"
|
||||
@ -6010,13 +5912,6 @@ read-cache@^1.0.0:
|
||||
dependencies:
|
||||
pify "^2.3.0"
|
||||
|
||||
read-file-stdin@^0.2.0:
|
||||
version "0.2.1"
|
||||
resolved "https://registry.yarnpkg.com/read-file-stdin/-/read-file-stdin-0.2.1.tgz#25eccff3a153b6809afacb23ee15387db9e0ee61"
|
||||
integrity sha1-JezP86FTtoCa+ssj7hU4fbng7mE=
|
||||
dependencies:
|
||||
gather-stream "^1.0.0"
|
||||
|
||||
"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.3, readable-stream@^2.3.6, readable-stream@~2.3.6:
|
||||
version "2.3.6"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf"
|
||||
@ -6049,9 +5944,9 @@ readdirp@^2.2.1:
|
||||
readable-stream "^2.0.2"
|
||||
|
||||
reduce-css-calc@^2.1.6:
|
||||
version "2.1.6"
|
||||
resolved "https://registry.yarnpkg.com/reduce-css-calc/-/reduce-css-calc-2.1.6.tgz#050fe6ee7d98a1d70775d2e93ce0b713cee394d2"
|
||||
integrity sha512-+l5/qlQmdsbM9h6JerJ/y5vR5Ci0k93aszLNpCmbadC3nBcbRGmIBm0s9Nj59i22LvCjTGftWzdQRwdknayxhw==
|
||||
version "2.1.7"
|
||||
resolved "https://registry.yarnpkg.com/reduce-css-calc/-/reduce-css-calc-2.1.7.tgz#1ace2e02c286d78abcd01fd92bfe8097ab0602c2"
|
||||
integrity sha512-fDnlZ+AybAS3C7Q9xDq5y8A2z+lT63zLbynew/lur/IR24OQF5x98tfNwf79mzEdfywZ0a2wpM860FhFfMxZlA==
|
||||
dependencies:
|
||||
css-unit-converter "^1.1.1"
|
||||
postcss-value-parser "^3.3.0"
|
||||
@ -6075,11 +5970,6 @@ regenerate@^1.4.0:
|
||||
resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11"
|
||||
integrity sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==
|
||||
|
||||
regenerator-runtime@^0.11.0:
|
||||
version "0.11.1"
|
||||
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9"
|
||||
integrity sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==
|
||||
|
||||
regenerator-runtime@^0.13.2:
|
||||
version "0.13.3"
|
||||
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz#7cf6a77d8f5c6f60eb73c5fc1955b2ceb01e6bf5"
|
||||
@ -6120,9 +6010,9 @@ regexpu-core@^4.6.0:
|
||||
unicode-match-property-value-ecmascript "^1.1.0"
|
||||
|
||||
regjsgen@^0.5.0:
|
||||
version "0.5.0"
|
||||
resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.0.tgz#a7634dc08f89209c2049adda3525711fb97265dd"
|
||||
integrity sha512-RnIrLhrXCX5ow/E5/Mh2O4e/oa1/jW0eaBKTSy3LaCj+M3Bqvm97GWDp2yUtzIs4LEn65zR2yiYGFqb2ApnzDA==
|
||||
version "0.5.1"
|
||||
resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.1.tgz#48f0bf1a5ea205196929c0d9798b42d1ed98443c"
|
||||
integrity sha512-5qxzGZjDs9w4tzT3TPhCJqWdCc3RLYwy9J2NB0nm5Lz+S273lvWcpjaTGHsT1dc6Hhfq41uSEOw8wBmxrKOuyg==
|
||||
|
||||
regjsparser@^0.6.0:
|
||||
version "0.6.0"
|
||||
@ -6333,12 +6223,12 @@ select@^1.1.2:
|
||||
resolved "https://registry.yarnpkg.com/select/-/select-1.1.2.tgz#0e7350acdec80b1108528786ec1d4418d11b396d"
|
||||
integrity sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=
|
||||
|
||||
selfsigned@^1.10.6:
|
||||
version "1.10.6"
|
||||
resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.6.tgz#7b3cd37ed9c2034261a173af1a1aae27d8169b67"
|
||||
integrity sha512-i3+CeqxL7DpAazgVpAGdKMwHuL63B5nhJMh9NQ7xmChGkA3jNFflq6Jyo1LLJYcr3idWiNOPWHCrm4zMayLG4w==
|
||||
selfsigned@^1.10.7:
|
||||
version "1.10.7"
|
||||
resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.7.tgz#da5819fd049d5574f28e88a9bcc6dbc6e6f3906b"
|
||||
integrity sha512-8M3wBCzeWIJnQfl43IKwOmC4H/RAp50S8DF60znzjW5GVqTcSe2vWclt7hmYVPkKPlHWOu5EaWOMZ2Y6W8ZXTA==
|
||||
dependencies:
|
||||
node-forge "0.8.2"
|
||||
node-forge "0.9.0"
|
||||
|
||||
semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0:
|
||||
version "5.7.1"
|
||||
@ -6469,6 +6359,11 @@ slash@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44"
|
||||
integrity sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==
|
||||
|
||||
smoothscroll-polyfill@^0.4.3:
|
||||
version "0.4.4"
|
||||
resolved "https://registry.yarnpkg.com/smoothscroll-polyfill/-/smoothscroll-polyfill-0.4.4.tgz#3a259131dc6930e6ca80003e1cb03b603b69abf8"
|
||||
integrity sha512-TK5ZA9U5RqCwMpfoMq/l1mrH0JAR7y7KRvOBx0n2869aLxch+gT9GhN3yUfjiw+d/DiF1mKo14+hd62JyMmoBg==
|
||||
|
||||
snapdragon-node@^2.0.1:
|
||||
version "2.1.1"
|
||||
resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b"
|
||||
@ -6543,9 +6438,9 @@ source-map-resolve@^0.5.0, source-map-resolve@^0.5.2:
|
||||
urix "^0.1.0"
|
||||
|
||||
source-map-support@~0.5.12:
|
||||
version "0.5.13"
|
||||
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932"
|
||||
integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==
|
||||
version "0.5.16"
|
||||
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.16.tgz#0ae069e7fe3ba7538c64c98515e35339eac5a042"
|
||||
integrity sha512-efyLRJDr68D9hBBNIPWFjhpFzURh+KJykQwvMyW5UiZzYwoF6l4YMMDIJJEyFWxWCqfyxLzz6tSfUFR+kXXsVQ==
|
||||
dependencies:
|
||||
buffer-from "^1.0.0"
|
||||
source-map "^0.6.0"
|
||||
@ -6560,7 +6455,7 @@ source-map@0.5.6:
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412"
|
||||
integrity sha1-dc449SvwczxafwwRjYEzSiu19BI=
|
||||
|
||||
source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6:
|
||||
source-map@^0.5.0, source-map@^0.5.6:
|
||||
version "0.5.7"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc"
|
||||
integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=
|
||||
@ -6725,12 +6620,7 @@ string-width@^3.0.0, string-width@^3.1.0:
|
||||
is-fullwidth-code-point "^2.0.0"
|
||||
strip-ansi "^5.1.0"
|
||||
|
||||
string.prototype.repeat@^0.2.0:
|
||||
version "0.2.0"
|
||||
resolved "https://registry.yarnpkg.com/string.prototype.repeat/-/string.prototype.repeat-0.2.0.tgz#aba36de08dcee6a5a337d49b2ea1da1b28fc0ecf"
|
||||
integrity sha1-q6Nt4I3O5qWjN9SbLqHaGyj8Ds8=
|
||||
|
||||
string.prototype.trimleft@^2.0.0:
|
||||
string.prototype.trimleft@^2.1.0:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/string.prototype.trimleft/-/string.prototype.trimleft-2.1.0.tgz#6cc47f0d7eb8d62b0f3701611715a3954591d634"
|
||||
integrity sha512-FJ6b7EgdKxxbDxc79cOlok6Afd++TTs5szo+zJTUyow3ycrRfJVE2pq3vcN53XexvKZu/DJMDfeI/qMiZTrjTw==
|
||||
@ -6738,7 +6628,7 @@ string.prototype.trimleft@^2.0.0:
|
||||
define-properties "^1.1.3"
|
||||
function-bind "^1.1.1"
|
||||
|
||||
string.prototype.trimright@^2.0.0:
|
||||
string.prototype.trimright@^2.1.0:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/string.prototype.trimright/-/string.prototype.trimright-2.1.0.tgz#669d164be9df9b6f7559fa8e89945b168a5a6c58"
|
||||
integrity sha512-fXZTSV55dNBwv16uw+hh5jkghxSnc5oHq+5K/gXgizHwAvMetdAJlHqqoFC1FSDVPYWLkAKl2cxpUT41sV7nSg==
|
||||
@ -6786,14 +6676,6 @@ strip-bom-string@^1.0.0:
|
||||
resolved "https://registry.yarnpkg.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz#e5211e9224369fbb81d633a2f00044dc8cedad92"
|
||||
integrity sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI=
|
||||
|
||||
strip-comments@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/strip-comments/-/strip-comments-1.0.2.tgz#82b9c45e7f05873bee53f37168af930aa368679d"
|
||||
integrity sha512-kL97alc47hoyIQSV165tTt9rG5dn4w1dNnBhOQ3bOU1Nc1hel09jnXANaHJ7vzHLd4Ju8kseDGzlev96pghLFw==
|
||||
dependencies:
|
||||
babel-extract-comments "^1.0.0"
|
||||
babel-plugin-transform-object-rest-spread "^6.26.0"
|
||||
|
||||
strip-eof@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf"
|
||||
@ -6841,13 +6723,6 @@ supports-color@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
|
||||
integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=
|
||||
|
||||
supports-color@^3.2.3:
|
||||
version "3.2.3"
|
||||
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6"
|
||||
integrity sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=
|
||||
dependencies:
|
||||
has-flag "^1.0.0"
|
||||
|
||||
supports-color@^5.3.0, supports-color@^5.4.0:
|
||||
version "5.5.0"
|
||||
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
|
||||
@ -6868,16 +6743,16 @@ svg-tags@^1.0.0:
|
||||
integrity sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q=
|
||||
|
||||
svgo@^1.0.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.3.0.tgz#bae51ba95ded9a33a36b7c46ce9c359ae9154313"
|
||||
integrity sha512-MLfUA6O+qauLDbym+mMZgtXCGRfIxyQoeH6IKVcFslyODEe/ElJNwr0FohQ3xG4C6HK6bk3KYPPXwHVJk3V5NQ==
|
||||
version "1.3.2"
|
||||
resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.3.2.tgz#b6dc511c063346c9e415b81e43401145b96d4167"
|
||||
integrity sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==
|
||||
dependencies:
|
||||
chalk "^2.4.1"
|
||||
coa "^2.0.2"
|
||||
css-select "^2.0.0"
|
||||
css-select-base-adapter "^0.1.1"
|
||||
css-tree "1.0.0-alpha.33"
|
||||
csso "^3.5.1"
|
||||
css-tree "1.0.0-alpha.37"
|
||||
csso "^4.0.2"
|
||||
js-yaml "^3.13.1"
|
||||
mkdirp "~0.5.1"
|
||||
object.values "^1.1.0"
|
||||
@ -6886,31 +6761,10 @@ svgo@^1.0.0:
|
||||
unquote "~1.1.1"
|
||||
util.promisify "~1.0.0"
|
||||
|
||||
tailwindcss@^0.7.4:
|
||||
version "0.7.4"
|
||||
resolved "https://registry.yarnpkg.com/tailwindcss/-/tailwindcss-0.7.4.tgz#fb7926821d42eacdc12e6621a49d21f37a3ff9e9"
|
||||
integrity sha512-+GeQjHRJ2VmeLkrNwMCbPDfm2cc5P8eoc7n+DtZfI8oQdlo5eSHqsIlPEuZOtoqQlIALsd2jAggWrUUBFGP2ow==
|
||||
dependencies:
|
||||
autoprefixer "^9.4.5"
|
||||
bytes "^3.0.0"
|
||||
chalk "^2.4.1"
|
||||
css.escape "^1.5.1"
|
||||
fs-extra "^4.0.2"
|
||||
lodash "^4.17.5"
|
||||
node-emoji "^1.8.1"
|
||||
perfectionist "^2.4.0"
|
||||
postcss "^7.0.11"
|
||||
postcss-functions "^3.0.0"
|
||||
postcss-js "^2.0.0"
|
||||
postcss-nested "^4.1.1"
|
||||
postcss-selector-parser "^5.0.0"
|
||||
pretty-hrtime "^1.0.3"
|
||||
strip-comments "^1.0.2"
|
||||
|
||||
tailwindcss@^1.0.6:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/tailwindcss/-/tailwindcss-1.1.2.tgz#0107dc092c3edee6132b105d896b109c0f66afd6"
|
||||
integrity sha512-mcTzZHXMipnQY9haB17baNJmBTkYYcC8ljfMdB9/97FfhKJIzlglJcyGythuQTOu7r/QIbLfZYYWZhAvaGj95A==
|
||||
tailwindcss@^1.0.6, tailwindcss@^1.1.2:
|
||||
version "1.1.3"
|
||||
resolved "https://registry.yarnpkg.com/tailwindcss/-/tailwindcss-1.1.3.tgz#ad154f78e1e44060e32e3ed44b27287c2be126a6"
|
||||
integrity sha512-8sa/QO+blnu3WXUylsgvYZlUbBpVH36QeGuZxgSGqp1dF3g4AGe1azt8FsO8i8Hfe9Oyvwhx3iSjRDak3nngeQ==
|
||||
dependencies:
|
||||
autoprefixer "^9.4.5"
|
||||
bytes "^3.0.0"
|
||||
@ -6961,9 +6815,9 @@ terser-webpack-plugin@^1.4.1:
|
||||
worker-farm "^1.7.0"
|
||||
|
||||
terser@^4.1.2:
|
||||
version "4.3.4"
|
||||
resolved "https://registry.yarnpkg.com/terser/-/terser-4.3.4.tgz#ad91bade95619e3434685d69efa621a5af5f877d"
|
||||
integrity sha512-Kcrn3RiW8NtHBP0ssOAzwa2MsIRQ8lJWiBG/K7JgqPlomA3mtb2DEmp4/hrUA+Jujx+WZ02zqd7GYD+QRBB/2Q==
|
||||
version "4.3.9"
|
||||
resolved "https://registry.yarnpkg.com/terser/-/terser-4.3.9.tgz#e4be37f80553d02645668727777687dad26bbca8"
|
||||
integrity sha512-NFGMpHjlzmyOtPL+fDw3G7+6Ueh/sz4mkaUYa4lJCxOPTNzd0Uj0aZJOmsDYoSQyfuVoWDMSWTPU3huyOm2zdA==
|
||||
dependencies:
|
||||
commander "^2.20.0"
|
||||
source-map "~0.6.1"
|
||||
@ -6988,9 +6842,9 @@ through@~2.3.4:
|
||||
integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=
|
||||
|
||||
thunky@^1.0.2:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.0.3.tgz#f5df732453407b09191dae73e2a8cc73f381a826"
|
||||
integrity sha512-YwT8pjmNcAXBZqrubu22P4FYsh2D4dxRmnWBOL8Jk8bUcRUtc5326kx32tuTmFDAZtLOGEVNl8POAR8j896Iow==
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.1.0.tgz#5abaf714a9405db0504732bbccd2cedd9ef9537d"
|
||||
integrity sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==
|
||||
|
||||
timers-browserify@^2.0.4:
|
||||
version "2.0.11"
|
||||
@ -7328,7 +7182,7 @@ vue-hot-reload-api@^2.3.0:
|
||||
resolved "https://registry.yarnpkg.com/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz#532955cc1eb208a3d990b3a9f9a70574657e08f2"
|
||||
integrity sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==
|
||||
|
||||
vue-loader@^15.2.4:
|
||||
vue-loader@^15.7.1:
|
||||
version "15.7.1"
|
||||
resolved "https://registry.yarnpkg.com/vue-loader/-/vue-loader-15.7.1.tgz#6ccacd4122aa80f69baaac08ff295a62e3aefcfd"
|
||||
integrity sha512-fwIKtA23Pl/rqfYP5TSGK7gkEuLhoTvRYW+TU7ER3q9GpNLt/PjG5NLv3XHRDiTg7OPM1JcckBgds+VnAc+HbA==
|
||||
@ -7339,12 +7193,12 @@ vue-loader@^15.2.4:
|
||||
vue-hot-reload-api "^2.3.0"
|
||||
vue-style-loader "^4.1.0"
|
||||
|
||||
vue-router@^3.0.2:
|
||||
vue-router@^3.1.3:
|
||||
version "3.1.3"
|
||||
resolved "https://registry.yarnpkg.com/vue-router/-/vue-router-3.1.3.tgz#e6b14fabc0c0ee9fda0e2cbbda74b350e28e412b"
|
||||
integrity sha512-8iSa4mGNXBjyuSZFCCO4fiKfvzqk+mhL0lnKuGcQtO1eoj8nq3CmbEG8FwK5QqoqwDgsjsf1GDuisDX4cdb/aQ==
|
||||
|
||||
vue-server-renderer@^2.5.16:
|
||||
vue-server-renderer@^2.6.10:
|
||||
version "2.6.10"
|
||||
resolved "https://registry.yarnpkg.com/vue-server-renderer/-/vue-server-renderer-2.6.10.tgz#cb2558842ead360ae2ec1f3719b75564a805b375"
|
||||
integrity sha512-UYoCEutBpKzL2fKCwx8zlRtRtwxbPZXKTqbl2iIF4yRZUNO/ovrHyDAJDljft0kd+K0tZhN53XRHkgvCZoIhug==
|
||||
@ -7366,7 +7220,7 @@ vue-style-loader@^4.1.0:
|
||||
hash-sum "^1.0.2"
|
||||
loader-utils "^1.0.2"
|
||||
|
||||
vue-template-compiler@^2.5.16:
|
||||
vue-template-compiler@^2.6.10:
|
||||
version "2.6.10"
|
||||
resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.6.10.tgz#323b4f3495f04faa3503337a82f5d6507799c9cc"
|
||||
integrity sha512-jVZkw4/I/HT5ZMvRnhv78okGusqe0+qH2A0Em0Cp8aq78+NK9TII263CDVz2QXZsIT+yyV/gZc/j/vlwa+Epyg==
|
||||
@ -7379,7 +7233,7 @@ vue-template-es2015-compiler@^1.9.0:
|
||||
resolved "https://registry.yarnpkg.com/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz#1ee3bc9a16ecbf5118be334bb15f9c46f82f5825"
|
||||
integrity sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==
|
||||
|
||||
vue@^2.5.16:
|
||||
vue@^2.6.10:
|
||||
version "2.6.10"
|
||||
resolved "https://registry.yarnpkg.com/vue/-/vue-2.6.10.tgz#a72b1a42a4d82a721ea438d1b6bf55e66195c637"
|
||||
integrity sha512-ImThpeNU9HbdZL3utgMCq0oiMzAkt1mcgy3/E6zWC/G6AaQoeuFdsl9nDhTDU3X1R6FK7nsIUuRACVcjI+A2GQ==
|
||||
@ -7397,20 +7251,27 @@ vuepress-html-webpack-plugin@^3.2.0:
|
||||
toposort "^1.0.0"
|
||||
util.promisify "1.0.0"
|
||||
|
||||
vuepress-plugin-container@^2.0.0:
|
||||
version "2.0.2"
|
||||
resolved "https://registry.yarnpkg.com/vuepress-plugin-container/-/vuepress-plugin-container-2.0.2.tgz#3489cc732c7a210b31f202556e1346125dffeb73"
|
||||
integrity sha512-SrGYYT7lkie7xlIlAVhn+9sDW42MytNCoxWL/2uDr+q9wZA4h1uYlQvfc2DVjy+FsM9PPPSslkeo/zCpYVY82g==
|
||||
vuepress-plugin-container@^2.0.2:
|
||||
version "2.1.0"
|
||||
resolved "https://registry.yarnpkg.com/vuepress-plugin-container/-/vuepress-plugin-container-2.1.0.tgz#eb2ba3e01cdac419bd678d40e05c934caffe6db0"
|
||||
integrity sha512-i4p7S1cqYUrg/3pt+xSghZtKSHVI3VXMQNept8ILxA+lMK1XJkdRkjNovZzwpXlrErQssvrUOTWBV0hdBv7eXQ==
|
||||
dependencies:
|
||||
markdown-it-container "^2.0.0"
|
||||
|
||||
vuepress@^1.0.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/vuepress/-/vuepress-1.1.0.tgz#ca0d787d93188b2fd05820a650d7e3643c9e7675"
|
||||
integrity sha512-LAgS9nXsmvjTuCc/LHPWnIsPOuVuZtxh1MjVZf/xJ3Yy5kXoPhqbGUptlQdQt3izjIlns9zin5K6MNBY3u5l5g==
|
||||
vuepress-plugin-smooth-scroll@^0.0.3:
|
||||
version "0.0.3"
|
||||
resolved "https://registry.yarnpkg.com/vuepress-plugin-smooth-scroll/-/vuepress-plugin-smooth-scroll-0.0.3.tgz#6eff2d4c186cca917cc9f7df2b0af7de7c8c6438"
|
||||
integrity sha512-qsQkDftLVFLe8BiviIHaLV0Ea38YLZKKonDGsNQy1IE0wllFpFIEldWD8frWZtDFdx6b/O3KDMgVQ0qp5NjJCg==
|
||||
dependencies:
|
||||
"@vuepress/core" "^1.1.0"
|
||||
"@vuepress/theme-default" "^1.1.0"
|
||||
smoothscroll-polyfill "^0.4.3"
|
||||
|
||||
vuepress@^1.0.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/vuepress/-/vuepress-1.2.0.tgz#2f2cdf337ad40a3e4866dfd33e97b840db386af7"
|
||||
integrity sha512-EfHo8Cc73qo+1Pm18hM0qOGynmDr8q5fu2664obynsdCJ1zpvoShVnA0Msraw4SI2xDc0iAoIb3dTwxUIM8DAw==
|
||||
dependencies:
|
||||
"@vuepress/core" "^1.2.0"
|
||||
"@vuepress/theme-default" "^1.2.0"
|
||||
cac "^6.3.9"
|
||||
envinfo "^7.2.0"
|
||||
opencollective-postinstall "^2.0.2"
|
||||
@ -7449,7 +7310,7 @@ webpack-dev-middleware@3.6.0:
|
||||
range-parser "^1.0.3"
|
||||
webpack-log "^2.0.0"
|
||||
|
||||
webpack-dev-middleware@^3.7.1:
|
||||
webpack-dev-middleware@^3.7.2:
|
||||
version "3.7.2"
|
||||
resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-3.7.2.tgz#0019c3db716e3fa5cecbf64f2ab88a74bab331f3"
|
||||
integrity sha512-1xC42LxbYoqLNAhV6YzTYacicgMZQTqRd27Sim9wn5hJrX3I5nxYy1SxSd4+gjUFsz1dQFj+yEe6zEVmSkeJjw==
|
||||
@ -7461,9 +7322,9 @@ webpack-dev-middleware@^3.7.1:
|
||||
webpack-log "^2.0.0"
|
||||
|
||||
webpack-dev-server@^3.5.1:
|
||||
version "3.8.1"
|
||||
resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-3.8.1.tgz#485b64c4aadc23f601e72114b40c1b1fea31d9f1"
|
||||
integrity sha512-9F5DnfFA9bsrhpUCAfQic/AXBVHvq+3gQS+x6Zj0yc1fVVE0erKh2MV4IV12TBewuTrYeeTIRwCH9qLMvdNvTw==
|
||||
version "3.9.0"
|
||||
resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-3.9.0.tgz#27c3b5d0f6b6677c4304465ac817623c8b27b89c"
|
||||
integrity sha512-E6uQ4kRrTX9URN9s/lIbqTAztwEPdvzVrcmHE8EQ9YnuT9J8Es5Wrd8n9BKg1a0oZ5EgEke/EQFgUsp18dSTBw==
|
||||
dependencies:
|
||||
ansi-html "0.0.7"
|
||||
bonjour "^3.5.0"
|
||||
@ -7474,18 +7335,18 @@ webpack-dev-server@^3.5.1:
|
||||
del "^4.1.1"
|
||||
express "^4.17.1"
|
||||
html-entities "^1.2.1"
|
||||
http-proxy-middleware "^0.19.1"
|
||||
http-proxy-middleware "0.19.1"
|
||||
import-local "^2.0.0"
|
||||
internal-ip "^4.3.0"
|
||||
ip "^1.1.5"
|
||||
is-absolute-url "^3.0.2"
|
||||
is-absolute-url "^3.0.3"
|
||||
killable "^1.0.1"
|
||||
loglevel "^1.6.4"
|
||||
opn "^5.5.0"
|
||||
p-retry "^3.0.1"
|
||||
portfinder "^1.0.24"
|
||||
portfinder "^1.0.25"
|
||||
schema-utils "^1.0.0"
|
||||
selfsigned "^1.10.6"
|
||||
selfsigned "^1.10.7"
|
||||
semver "^6.3.0"
|
||||
serve-index "^1.9.1"
|
||||
sockjs "0.3.19"
|
||||
@ -7494,7 +7355,7 @@ webpack-dev-server@^3.5.1:
|
||||
strip-ansi "^3.0.1"
|
||||
supports-color "^6.1.0"
|
||||
url "^0.11.0"
|
||||
webpack-dev-middleware "^3.7.1"
|
||||
webpack-dev-middleware "^3.7.2"
|
||||
webpack-log "^2.0.0"
|
||||
ws "^6.2.1"
|
||||
yargs "12.0.5"
|
||||
@ -7523,9 +7384,9 @@ webpack-sources@^1.1.0, webpack-sources@^1.4.0, webpack-sources@^1.4.1:
|
||||
source-map "~0.6.1"
|
||||
|
||||
webpack@^4.8.1:
|
||||
version "4.41.0"
|
||||
resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.41.0.tgz#db6a254bde671769f7c14e90a1a55e73602fc70b"
|
||||
integrity sha512-yNV98U4r7wX1VJAj5kyMsu36T8RPPQntcb5fJLOsMz/pt/WrKC0Vp1bAlqPLkA1LegSwQwf6P+kAbyhRKVQ72g==
|
||||
version "4.41.2"
|
||||
resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.41.2.tgz#c34ec76daa3a8468c9b61a50336d8e3303dce74e"
|
||||
integrity sha512-Zhw69edTGfbz9/8JJoyRQ/pq8FYUoY0diOXqW0T6yhgdhCv6wr0hra5DwwWexNRns2Z2+gsnrNcbe9hbGBgk/A==
|
||||
dependencies:
|
||||
"@webassemblyjs/ast" "1.8.5"
|
||||
"@webassemblyjs/helper-module-context" "1.8.5"
|
||||
@ -7632,11 +7493,6 @@ wrappy@1:
|
||||
resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
|
||||
integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=
|
||||
|
||||
write-file-stdout@0.0.2:
|
||||
version "0.0.2"
|
||||
resolved "https://registry.yarnpkg.com/write-file-stdout/-/write-file-stdout-0.0.2.tgz#c252d7c7c5b1b402897630e3453c7bfe690d9ca1"
|
||||
integrity sha1-wlLXx8WxtAKJdjDjRTx7/mkNnKE=
|
||||
|
||||
ws@^6.2.1:
|
||||
version "6.2.1"
|
||||
resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.1.tgz#442fdf0a47ed64f59b6a5d8ff130f4748ed524fb"
|
||||
@ -7660,9 +7516,9 @@ yallist@^2.1.2:
|
||||
integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=
|
||||
|
||||
yallist@^3.0.0, yallist@^3.0.2, yallist@^3.0.3:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.0.tgz#906cc2100972dc2625ae78f566a2577230a1d6f7"
|
||||
integrity sha512-6gpP93MR+VOOehKbCPchro3wFZNSNmek8A2kbkOAZLIZAYx1KP/zAqwO0sOHi3xJEb+UBz8NaYt/17UNit1Q9w==
|
||||
version "3.1.1"
|
||||
resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd"
|
||||
integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==
|
||||
|
||||
yargs-parser@^11.1.1:
|
||||
version "11.1.1"
|
||||
@ -7672,10 +7528,10 @@ yargs-parser@^11.1.1:
|
||||
camelcase "^5.0.0"
|
||||
decamelize "^1.2.0"
|
||||
|
||||
yargs-parser@^13.1.1:
|
||||
version "13.1.1"
|
||||
resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.1.tgz#d26058532aa06d365fe091f6a1fc06b2f7e5eca0"
|
||||
integrity sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ==
|
||||
yargs-parser@^15.0.0:
|
||||
version "15.0.0"
|
||||
resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-15.0.0.tgz#cdd7a97490ec836195f59f3f4dbe5ea9e8f75f08"
|
||||
integrity sha512-xLTUnCMc4JhxrPEPUYD5IBR1mWCK/aT6+RJ/K29JY2y1vD+FhtgKK0AXRWvI262q3QSffAQuTouFIKUuHX89wQ==
|
||||
dependencies:
|
||||
camelcase "^5.0.0"
|
||||
decamelize "^1.2.0"
|
||||
@ -7699,9 +7555,9 @@ yargs@12.0.5:
|
||||
yargs-parser "^11.1.1"
|
||||
|
||||
yargs@^14.0.0:
|
||||
version "14.0.0"
|
||||
resolved "https://registry.yarnpkg.com/yargs/-/yargs-14.0.0.tgz#ba4cacc802b3c0b3e36a9e791723763d57a85066"
|
||||
integrity sha512-ssa5JuRjMeZEUjg7bEL99AwpitxU/zWGAGpdj0di41pOEmJti8NR6kyUIJBkR78DTYNPZOU08luUo0GTHuB+ow==
|
||||
version "14.2.0"
|
||||
resolved "https://registry.yarnpkg.com/yargs/-/yargs-14.2.0.tgz#f116a9242c4ed8668790b40759b4906c276e76c3"
|
||||
integrity sha512-/is78VKbKs70bVZH7w4YaZea6xcJWOAwkhbR0CFuZBmYtfTYF0xjGJF43AYd8g2Uii1yJwmS5GR2vBmrc32sbg==
|
||||
dependencies:
|
||||
cliui "^5.0.0"
|
||||
decamelize "^1.2.0"
|
||||
@ -7713,7 +7569,7 @@ yargs@^14.0.0:
|
||||
string-width "^3.0.0"
|
||||
which-module "^2.0.0"
|
||||
y18n "^4.0.0"
|
||||
yargs-parser "^13.1.1"
|
||||
yargs-parser "^15.0.0"
|
||||
|
||||
zepto@^1.2.0:
|
||||
version "1.2.0"
|
||||
|
@ -0,0 +1,273 @@
|
||||
GIT
|
||||
remote: https://github.com/stympy/faker.git
|
||||
revision: 4e9144825fcc9ba5c83cc0fd037779ab82f3120b
|
||||
branch: master
|
||||
specs:
|
||||
faker (2.6.0)
|
||||
i18n (>= 1.6, < 1.8)
|
||||
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
actioncable (6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
nio4r (~> 2.0)
|
||||
websocket-driver (>= 0.6.1)
|
||||
actionmailbox (6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
activejob (= 6.0.0)
|
||||
activerecord (= 6.0.0)
|
||||
activestorage (= 6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
mail (>= 2.7.1)
|
||||
actionmailer (6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
actionview (= 6.0.0)
|
||||
activejob (= 6.0.0)
|
||||
mail (~> 2.5, >= 2.5.4)
|
||||
rails-dom-testing (~> 2.0)
|
||||
actionpack (6.0.0)
|
||||
actionview (= 6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
rack (~> 2.0)
|
||||
rack-test (>= 0.6.3)
|
||||
rails-dom-testing (~> 2.0)
|
||||
rails-html-sanitizer (~> 1.0, >= 1.2.0)
|
||||
actiontext (6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
activerecord (= 6.0.0)
|
||||
activestorage (= 6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
nokogiri (>= 1.8.5)
|
||||
actionview (6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
builder (~> 3.1)
|
||||
erubi (~> 1.4)
|
||||
rails-dom-testing (~> 2.0)
|
||||
rails-html-sanitizer (~> 1.1, >= 1.2.0)
|
||||
activejob (6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
globalid (>= 0.3.6)
|
||||
activemodel (6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
activerecord (6.0.0)
|
||||
activemodel (= 6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
activestorage (6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
activejob (= 6.0.0)
|
||||
activerecord (= 6.0.0)
|
||||
marcel (~> 0.3.1)
|
||||
activesupport (6.0.0)
|
||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||
i18n (>= 0.7, < 2)
|
||||
minitest (~> 5.1)
|
||||
tzinfo (~> 1.1)
|
||||
zeitwerk (~> 2.1, >= 2.1.8)
|
||||
addressable (2.7.0)
|
||||
public_suffix (>= 2.0.2, < 5.0)
|
||||
archive-zip (0.12.0)
|
||||
io-like (~> 0.3.0)
|
||||
bcrypt (3.1.13)
|
||||
bindex (0.8.1)
|
||||
bootsnap (1.4.5)
|
||||
msgpack (~> 1.0)
|
||||
builder (3.2.3)
|
||||
byebug (11.0.1)
|
||||
capybara (3.29.0)
|
||||
addressable
|
||||
mini_mime (>= 0.1.3)
|
||||
nokogiri (~> 1.8)
|
||||
rack (>= 1.6.0)
|
||||
rack-test (>= 0.6.3)
|
||||
regexp_parser (~> 1.5)
|
||||
xpath (~> 3.2)
|
||||
childprocess (3.0.0)
|
||||
chromedriver-helper (2.1.1)
|
||||
archive-zip (~> 0.10)
|
||||
nokogiri (~> 1.8)
|
||||
coffee-rails (4.2.2)
|
||||
coffee-script (>= 2.2.0)
|
||||
railties (>= 4.0.0)
|
||||
coffee-script (2.4.1)
|
||||
coffee-script-source
|
||||
execjs
|
||||
coffee-script-source (1.12.2)
|
||||
concurrent-ruby (1.1.5)
|
||||
crass (1.0.4)
|
||||
devise (4.7.1)
|
||||
bcrypt (~> 3.0)
|
||||
orm_adapter (~> 0.1)
|
||||
railties (>= 4.1.0)
|
||||
responders
|
||||
warden (~> 1.2.3)
|
||||
erubi (1.9.0)
|
||||
execjs (2.7.0)
|
||||
ffi (1.11.1)
|
||||
globalid (0.4.2)
|
||||
activesupport (>= 4.2.0)
|
||||
i18n (1.7.0)
|
||||
concurrent-ruby (~> 1.0)
|
||||
io-like (0.3.0)
|
||||
jbuilder (2.9.1)
|
||||
activesupport (>= 4.2.0)
|
||||
listen (3.1.5)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
ruby_dep (~> 1.2)
|
||||
loofah (2.3.0)
|
||||
crass (~> 1.0.2)
|
||||
nokogiri (>= 1.5.9)
|
||||
mail (2.7.1)
|
||||
mini_mime (>= 0.1.1)
|
||||
marcel (0.3.3)
|
||||
mimemagic (~> 0.3.2)
|
||||
method_source (0.9.2)
|
||||
mimemagic (0.3.3)
|
||||
mini_mime (1.0.2)
|
||||
mini_portile2 (2.4.0)
|
||||
minitest (5.12.2)
|
||||
msgpack (1.3.1)
|
||||
nio4r (2.5.2)
|
||||
nokogiri (1.10.4)
|
||||
mini_portile2 (~> 2.4.0)
|
||||
orm_adapter (0.5.0)
|
||||
pg (1.1.4)
|
||||
public_suffix (4.0.1)
|
||||
puma (3.12.1)
|
||||
rack (2.0.7)
|
||||
rack-test (1.1.0)
|
||||
rack (>= 1.0, < 3)
|
||||
rails (6.0.0)
|
||||
actioncable (= 6.0.0)
|
||||
actionmailbox (= 6.0.0)
|
||||
actionmailer (= 6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
actiontext (= 6.0.0)
|
||||
actionview (= 6.0.0)
|
||||
activejob (= 6.0.0)
|
||||
activemodel (= 6.0.0)
|
||||
activerecord (= 6.0.0)
|
||||
activestorage (= 6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
bundler (>= 1.3.0)
|
||||
railties (= 6.0.0)
|
||||
sprockets-rails (>= 2.0.0)
|
||||
rails-dom-testing (2.0.3)
|
||||
activesupport (>= 4.2.0)
|
||||
nokogiri (>= 1.6)
|
||||
rails-html-sanitizer (1.3.0)
|
||||
loofah (~> 2.3)
|
||||
railties (6.0.0)
|
||||
actionpack (= 6.0.0)
|
||||
activesupport (= 6.0.0)
|
||||
method_source
|
||||
rake (>= 0.8.7)
|
||||
thor (>= 0.20.3, < 2.0)
|
||||
rake (13.0.0)
|
||||
rb-fsevent (0.10.3)
|
||||
rb-inotify (0.10.0)
|
||||
ffi (~> 1.0)
|
||||
redis (4.1.3)
|
||||
redis-actionpack (5.1.0)
|
||||
actionpack (>= 4.0, < 7)
|
||||
redis-rack (>= 1, < 3)
|
||||
redis-store (>= 1.1.0, < 2)
|
||||
redis-activesupport (5.2.0)
|
||||
activesupport (>= 3, < 7)
|
||||
redis-store (>= 1.3, < 2)
|
||||
redis-rack (2.0.6)
|
||||
rack (>= 1.5, < 3)
|
||||
redis-store (>= 1.2, < 2)
|
||||
redis-rails (5.0.2)
|
||||
redis-actionpack (>= 5.0, < 6)
|
||||
redis-activesupport (>= 5.0, < 6)
|
||||
redis-store (>= 1.2, < 2)
|
||||
redis-store (1.8.0)
|
||||
redis (>= 4, < 5)
|
||||
regexp_parser (1.6.0)
|
||||
responders (3.0.0)
|
||||
actionpack (>= 5.0)
|
||||
railties (>= 5.0)
|
||||
ruby_dep (1.5.0)
|
||||
rubyzip (2.0.0)
|
||||
sass (3.7.4)
|
||||
sass-listen (~> 4.0.0)
|
||||
sass-listen (4.0.0)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
sass-rails (5.1.0)
|
||||
railties (>= 5.2.0)
|
||||
sass (~> 3.1)
|
||||
sprockets (>= 2.8, < 4.0)
|
||||
sprockets-rails (>= 2.0, < 4.0)
|
||||
tilt (>= 1.1, < 3)
|
||||
selenium-webdriver (3.142.6)
|
||||
childprocess (>= 0.5, < 4.0)
|
||||
rubyzip (>= 1.2.2)
|
||||
spring (2.1.0)
|
||||
spring-watcher-listen (2.0.1)
|
||||
listen (>= 2.7, < 4.0)
|
||||
spring (>= 1.2, < 3.0)
|
||||
sprockets (3.7.2)
|
||||
concurrent-ruby (~> 1.0)
|
||||
rack (> 1, < 3)
|
||||
sprockets-rails (3.2.1)
|
||||
actionpack (>= 4.0)
|
||||
activesupport (>= 4.0)
|
||||
sprockets (>= 3.0.0)
|
||||
thor (0.20.3)
|
||||
thread_safe (0.3.6)
|
||||
tilt (2.0.10)
|
||||
turbolinks (5.2.1)
|
||||
turbolinks-source (~> 5.2)
|
||||
turbolinks-source (5.2.0)
|
||||
tzinfo (1.2.5)
|
||||
thread_safe (~> 0.1)
|
||||
uglifier (4.2.0)
|
||||
execjs (>= 0.3.0, < 3)
|
||||
warden (1.2.8)
|
||||
rack (>= 2.0.6)
|
||||
web-console (4.0.1)
|
||||
actionview (>= 6.0.0)
|
||||
activemodel (>= 6.0.0)
|
||||
bindex (>= 0.4.0)
|
||||
railties (>= 6.0.0)
|
||||
websocket-driver (0.7.1)
|
||||
websocket-extensions (>= 0.1.0)
|
||||
websocket-extensions (0.1.4)
|
||||
xpath (3.2.0)
|
||||
nokogiri (~> 1.8)
|
||||
zeitwerk (2.2.0)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
bootsnap (>= 1.1.0)
|
||||
byebug
|
||||
capybara (>= 2.15)
|
||||
chromedriver-helper
|
||||
coffee-rails (~> 4.2)
|
||||
devise
|
||||
faker!
|
||||
jbuilder (~> 2.5)
|
||||
listen (>= 3.0.5, < 3.2)
|
||||
pg (>= 0.18, < 2.0)
|
||||
puma (~> 3.11)
|
||||
rails (~> 6.0.0.rc1)
|
||||
redis-rails
|
||||
sass-rails (~> 5.0)
|
||||
selenium-webdriver
|
||||
spring
|
||||
spring-watcher-listen (~> 2.0.0)
|
||||
turbolinks (~> 5)
|
||||
tzinfo-data
|
||||
uglifier (>= 1.3.0)
|
||||
web-console (>= 3.3.0)
|
||||
|
||||
RUBY VERSION
|
||||
ruby 2.5.7p206
|
||||
|
||||
BUNDLED WITH
|
||||
1.17.3
|
||||
|
@ -19,7 +19,7 @@ default: &default
|
||||
encoding: unicode
|
||||
host: db
|
||||
username: postgres
|
||||
password:
|
||||
password: postgres
|
||||
pool: 5
|
||||
|
||||
development:
|
||||
|
@ -63,4 +63,6 @@ Rails.application.configure do
|
||||
|
||||
config.web_console.whitelisted_ips = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']
|
||||
|
||||
config.hosts << "rails_app"
|
||||
|
||||
end
|
||||
|
14
go.mod
14
go.mod
@ -2,8 +2,7 @@ module github.com/dosco/super-graph
|
||||
|
||||
require (
|
||||
github.com/GeertJohan/go.rice v1.0.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/OneOfOne/xxhash v1.2.5 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible
|
||||
@ -16,27 +15,22 @@ require (
|
||||
github.com/garyburd/redigo v1.6.0
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
|
||||
github.com/gobuffalo/flect v0.1.6
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||
github.com/jackc/pgconn v1.0.1
|
||||
github.com/jackc/pgtype v1.0.1
|
||||
github.com/jackc/pgx v3.6.0+incompatible
|
||||
github.com/jackc/pgx/v4 v4.0.1
|
||||
github.com/jackc/tern v1.8.2
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/pelletier/go-toml v1.4.0 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/rs/cors v1.7.0
|
||||
github.com/rs/zerolog v1.15.0
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/valyala/fasttemplate v1.0.1
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
|
||||
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
|
||||
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.7 // indirect
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
49
go.sum
49
go.sum
@ -1,34 +1,28 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
|
||||
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
|
||||
github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=
|
||||
github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
|
||||
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
|
||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3 h1:+qz9Ga6l6lKw6fgvk5RMV5HQznSLvI8Zxajwdj4FhFg=
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3/go.mod h1:FlkD11RtgMTYjVuBnb7cxoHmQGqvPpCsr2atC88nl/M=
|
||||
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
|
||||
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8=
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.0.0 h1:Eb1IiuHmi3FhT12NKfqCQXSXRqc4NTMvgJoREemrSt4=
|
||||
github.com/cespare/xxhash/v2 v2.0.0/go.mod h1:MaMeaVDXZNmTpkOyhVs3/WfjgobkbQgfrVnrr3DyZL0=
|
||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@ -41,6 +35,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=
|
||||
@ -68,8 +63,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/flect v0.1.1 h1:GTZJjJufv9FxgRs1+0Soo3wj+Md3kTUmTER/YE4uINA=
|
||||
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/gobuffalo/flect v0.1.6 h1:D7KWNRFiCknJKA495/e1BO7oxqf8tbieaLv/ehoZ/+g=
|
||||
github.com/gobuffalo/flect v0.1.6/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
@ -85,8 +78,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
@ -98,8 +89,6 @@ github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZb
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||
@ -124,9 +113,6 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.0.1 h1:7GWB9n3DdnO3TIbj59wMAE9QcHPL4cy/Bbtk5P1Noow=
|
||||
github.com/jackc/pgtype v1.0.1/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
|
||||
github.com/jackc/pgx v0.0.0-20180217033919-55ca9db5d578/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=
|
||||
github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
@ -136,8 +122,7 @@ github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0f
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.0.0 h1:rbjAshlgKscNa7j0jAM0uNQflis5o2XUogPMVAwtcsM=
|
||||
github.com/jackc/puddle v1.0.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/tern v1.8.2 h1:+d9eK83fRS0dbf6nt+2tjILYF4FKG1O5xTFB8Lzc66U=
|
||||
github.com/jackc/tern v1.8.2/go.mod h1:AMppp2oyCT6rYnJHLLMmPWwahfFvdIVi6mr9gH81Nxs=
|
||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
@ -161,23 +146,25 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/pkg/errors v0.0.0-20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -193,10 +180,13 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
@ -206,22 +196,16 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.0-20160114030619-9c9300901990/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20151218134703-7f60f83a2c81/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
@ -245,8 +229,6 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
|
||||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
@ -256,7 +238,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20151201002508-7b85b097bf75/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -301,8 +282,8 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 h1:q9u40nxWT5zRClI/uU9dHCiYGottAg6Nzz4YUQyHxdA=
|
||||
golang.org/x/sys v0.0.0-20190927073244-c990c680b611/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU=
|
||||
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
@ -333,4 +314,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
9
jsn/bench.0
Normal file
9
jsn/bench.0
Normal file
@ -0,0 +1,9 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/jsn
|
||||
BenchmarkGet-8 13310 88437 ns/op 3328 B/op 2 allocs/op
|
||||
BenchmarkFilter-8 182232 6922 ns/op 448 B/op 1 allocs/op
|
||||
BenchmarkStrip-8 162709 6560 ns/op 224 B/op 1 allocs/op
|
||||
BenchmarkReplace-8 85846 13996 ns/op 416 B/op 1 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/jsn 5.913s
|
@ -27,14 +27,27 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
|
||||
var k []byte
|
||||
state := expectKey
|
||||
instr := false
|
||||
slash := 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if instr && b[i] == '\\' {
|
||||
slash++
|
||||
continue
|
||||
}
|
||||
|
||||
if b[i] == '"' && (slash%2 == 0) {
|
||||
instr = !instr
|
||||
}
|
||||
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
if !instr {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -64,7 +77,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
case state == expectKeyClose && (b[i] == '"' && (slash%2 == 0)):
|
||||
state = expectColon
|
||||
k = b[(s + 1):i]
|
||||
|
||||
@ -74,7 +87,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
case state == expectValue && b[i] == '"':
|
||||
state = expectString
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
case state == expectString && (b[i] == '"' && (slash%2 == 0)):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
@ -97,8 +110,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
e = i - 1
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
@ -110,7 +122,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
case state == expectNull && (b[i-1] == 'l' && b[i] == 'l'):
|
||||
e = i
|
||||
}
|
||||
|
||||
@ -140,7 +152,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
}
|
||||
|
||||
if sk > 0 && sk < len(cb) {
|
||||
_, err = w.Write(cb[sk:len(cb)])
|
||||
_, err = w.Write(cb[sk:])
|
||||
} else {
|
||||
_, err = w.Write(cb)
|
||||
}
|
||||
|
36
jsn/fuzz.go
36
jsn/fuzz.go
@ -1,35 +1,11 @@
|
||||
// +build gofuzz
|
||||
|
||||
package jsn
|
||||
|
||||
import "bytes"
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func FuzzerEntryPoint(data []byte) int {
|
||||
err1 := Validate(string(data))
|
||||
|
||||
var b1 bytes.Buffer
|
||||
err2 := Filter(&b1, data, []string{"id", "full_name", "embed"})
|
||||
|
||||
path1 := [][]byte{[]byte("data"), []byte("users")}
|
||||
Strip(data, path1)
|
||||
|
||||
from := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
func Fuzz(data []byte) int {
|
||||
if err := unifiedTest(data); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
to := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`"1234567890"`)},
|
||||
{[]byte("some_list"), []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)},
|
||||
}
|
||||
|
||||
var b2 bytes.Buffer
|
||||
err3 := Replace(&b2, data, from, to)
|
||||
|
||||
Keys(data)
|
||||
|
||||
if err1 != nil || err2 != nil || err3 != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
return 1
|
||||
}
|
||||
|
60
jsn/fuzz_test.go
Normal file
60
jsn/fuzz_test.go
Normal file
@ -0,0 +1,60 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFuzzCrashers(t *testing.T) {
|
||||
var crashers = []string{
|
||||
"00\"0000\"0{",
|
||||
"6\",\n\t\t\t\"something\": " +
|
||||
"null\n\t\t},\n\t\t{\n\t\t\t\"id" +
|
||||
"\": 12,\n\t\t\t\"full_name" +
|
||||
"\": \"Brenton Bauch Ph" +
|
||||
"D\",\n\t\t\t\"email\": \"ren" +
|
||||
"ee@miller.co\",\n\t\t\t\"_" +
|
||||
"_twitter_id\": 1\n\t\t}," +
|
||||
"\n\t\t{\n\t\t\t\"id\": 13,\n\t\t" +
|
||||
"\t\"full_name\": \"Daine" +
|
||||
" Gleichner\",\n\t\t\t\"ema" +
|
||||
"il\": \"andrea@gmail.c" +
|
||||
"om\",\n\t\t\t\"__twitter_i" +
|
||||
"d\": \"\",\n\t\t\t\"id__twit" +
|
||||
"ter_id\": \"NOOO\",\n\t\t\t" +
|
||||
"\"work_email\": \"andre" +
|
||||
"a@nienow.co\"\n\t\t}\n\t]}" +
|
||||
"\n\t}",
|
||||
"0000\"0000\"0{",
|
||||
"0000\"\"{",
|
||||
"0000\"000\"{",
|
||||
"0\"\"{",
|
||||
"\"0\"{",
|
||||
"000\"0\"{",
|
||||
"0\"0000\"0{",
|
||||
"000\"\"{",
|
||||
"0\"00\"{",
|
||||
"000\"0000\"0{",
|
||||
"000\"00\"{",
|
||||
"\"\"{",
|
||||
"0\"0000\"{",
|
||||
"\"000\"00{",
|
||||
"0000\"00\"{",
|
||||
"00\"0\"{",
|
||||
"0\"0\"{",
|
||||
"000\"0000\"{",
|
||||
"00\"0000\"{",
|
||||
"0000\"0000\"{",
|
||||
"\"000\"{",
|
||||
"00\"00\"{",
|
||||
"00\"0000\"00{",
|
||||
"0\"0000\"00{",
|
||||
"00\"\"{",
|
||||
"0000\"0\"{",
|
||||
"000\"000\"{",
|
||||
"\"00000000\"{",
|
||||
}
|
||||
|
||||
for _, f := range crashers {
|
||||
_ = unifiedTest([]byte(f))
|
||||
}
|
||||
}
|
50
jsn/get.go
50
jsn/get.go
@ -51,13 +51,27 @@ func Get(b []byte, keys [][]byte) []Field {
|
||||
state := expectKey
|
||||
|
||||
n := 0
|
||||
instr := false
|
||||
slash := 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if instr && b[i] == '\\' {
|
||||
slash++
|
||||
continue
|
||||
}
|
||||
|
||||
if b[i] == '"' && (slash%2 == 0) {
|
||||
instr = !instr
|
||||
}
|
||||
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
if !instr {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -66,7 +80,7 @@ func Get(b []byte, keys [][]byte) []Field {
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
case state == expectKeyClose && (b[i] == '"' && (slash%2 == 0)):
|
||||
state = expectColon
|
||||
k = b[(s + 1):i]
|
||||
|
||||
@ -77,7 +91,7 @@ func Get(b []byte, keys [][]byte) []Field {
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
case state == expectString && (b[i] == '"' && (slash%2 == 0)):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
@ -105,8 +119,7 @@ func Get(b []byte, keys [][]byte) []Field {
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
e = i - 1
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
@ -118,8 +131,9 @@ func Get(b []byte, keys [][]byte) []Field {
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
s = i
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
case state == expectNull && (b[i-1] == 'l' && b[i] == 'l'):
|
||||
e = i
|
||||
}
|
||||
|
||||
@ -131,9 +145,25 @@ func Get(b []byte, keys [][]byte) []Field {
|
||||
n++
|
||||
}
|
||||
|
||||
if state == expectListClose {
|
||||
loop:
|
||||
for j := i + 1; j < len(b); j++ {
|
||||
switch b[j] {
|
||||
case ' ', '\t', '\n':
|
||||
continue
|
||||
case '{':
|
||||
break loop
|
||||
}
|
||||
i = e
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
state = expectKey
|
||||
e = 0
|
||||
}
|
||||
|
||||
slash = 0
|
||||
}
|
||||
|
||||
return res[:n]
|
||||
|
144
jsn/json_test.go
144
jsn/json_test.go
@ -2,6 +2,9 @@ package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -9,16 +12,16 @@ var (
|
||||
input1 = `
|
||||
{
|
||||
"data": {
|
||||
"test": { "__twitter_id": "ABCD" },
|
||||
"test_1a": { "__twitter_id": "ABCD" },
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"full_name": "'Sidney St[1]roman'",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
"id": 8,
|
||||
"full_name": "Caroll Orn Sr.",
|
||||
"full_name": "Caroll Orn Sr's",
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "ABC123"
|
||||
"more": [{
|
||||
@ -37,7 +40,7 @@ var (
|
||||
"id": 3,
|
||||
"full_name": "Kenna Cassin",
|
||||
"email": "user2@demo.com",
|
||||
"__twitter_id": { "name": "hello", "address": { "work": "1 infinity loop" } }
|
||||
"__twitter_id": { "name": "\"hellos\"", "address": { "work": "1 infinity loop" } }
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
@ -108,7 +111,7 @@ var (
|
||||
input2 = `
|
||||
[{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"full_name": "Sidney St[1]roman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"something": null,
|
||||
@ -130,7 +133,7 @@ var (
|
||||
input3 = `
|
||||
{
|
||||
"data": {
|
||||
"test": { "__twitter_id": "ABCD" },
|
||||
"test_1a": { "__twitter_id": "ABCD" },
|
||||
"users": [{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]
|
||||
}
|
||||
}`
|
||||
@ -138,7 +141,7 @@ var (
|
||||
input4 = `
|
||||
{ "users" : [{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"full_name": "Sidney St[1]roman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
@ -155,24 +158,33 @@ var (
|
||||
"email": "user1@demo.com",
|
||||
"__twitter_id": [{ "name": "hello" }, { "name": "world"}]
|
||||
}] }`
|
||||
|
||||
input5 = `
|
||||
{"data":{"title":"In September 2018, Slovak police stated that Kuciak was murdered because of his investigative work, and that the murder had been ordered.[9][10] They arrested eight suspects,[11] charging three of them with first-degree murder.[11]","topics":["cpp"]},"a":["1111"]},"thread_slug":"in-september-2018-slovak-police-stated-that-kuciak-7929",}`
|
||||
|
||||
input6 = `
|
||||
{"users" : [{"id" : 1, "email" : "vicram@gmail.com", "slug" : "vikram-rangnekar", "threads" : [], "threads_cursor" : null}, {"id" : 3, "email" : "marareilly@lang.name", "slug" : "raymundo-corwin", "threads" : [{"id" : 9, "title" : "Et alias et aut porro praesentium nam in voluptatem reiciendis quisquam perspiciatis inventore eos quia et et enim qui amet."}, {"id" : 25, "title" : "Ipsam quam nemo culpa tempore amet optio sit sed eligendi autem consequatur quaerat rem velit quibusdam quibusdam optio a voluptatem."}], "threads_cursor" : 25}], "users_cursor" : 3}`
|
||||
|
||||
input7, _ = ioutil.ReadFile("test7.json")
|
||||
|
||||
input8, _ = ioutil.ReadFile("test8.json")
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
values := Get([]byte(input1), [][]byte{
|
||||
[]byte("test_1a"),
|
||||
[]byte("__twitter_id"),
|
||||
[]byte("work_email"),
|
||||
})
|
||||
|
||||
expected := []Field{
|
||||
{[]byte("test_1a"), []byte(`{ "__twitter_id": "ABCD" }`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABCD"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"2048666903444506956"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"more123"`)},
|
||||
{[]byte("__twitter_id"),
|
||||
[]byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"),
|
||||
[]byte(`{ "name": "hello", "address": { "work": "1 infinity loop" } }`),
|
||||
},
|
||||
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"), []byte(`{ "name": "\"hellos\"", "address": { "work": "1 infinity loop" } }`)},
|
||||
{[]byte("__twitter_id"), []byte(`1234567890`)},
|
||||
{[]byte("__twitter_id"), []byte(`1.23E`)},
|
||||
{[]byte("__twitter_id"), []byte(`true`)},
|
||||
@ -191,16 +203,92 @@ func TestGet(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
if bytes.Equal(values[i].Key, expected[i].Key) == false {
|
||||
if !bytes.Equal(values[i].Key, expected[i].Key) {
|
||||
t.Error(string(values[i].Key), " != ", string(expected[i].Key))
|
||||
}
|
||||
|
||||
if bytes.Equal(values[i].Value, expected[i].Value) == false {
|
||||
if !bytes.Equal(values[i].Value, expected[i].Value) {
|
||||
t.Error(string(values[i].Value), " != ", string(expected[i].Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet1(t *testing.T) {
|
||||
values := Get([]byte(input5), [][]byte{
|
||||
[]byte("thread_slug"),
|
||||
})
|
||||
|
||||
expected := []Field{
|
||||
{[]byte("thread_slug"), []byte(`"in-september-2018-slovak-police-stated-that-kuciak-7929"`)},
|
||||
}
|
||||
|
||||
if len(values) != len(expected) {
|
||||
t.Fatal("len(values) != len(expected)")
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
if !bytes.Equal(values[i].Key, expected[i].Key) {
|
||||
t.Error(string(values[i].Key), " != ", string(expected[i].Key))
|
||||
}
|
||||
|
||||
if !bytes.Equal(values[i].Value, expected[i].Value) {
|
||||
t.Error(string(values[i].Value), " != ", string(expected[i].Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet2(t *testing.T) {
|
||||
values := Get([]byte(input6), [][]byte{
|
||||
[]byte("users_cursor"), []byte("threads_cursor"),
|
||||
})
|
||||
|
||||
expected := []Field{
|
||||
{[]byte("threads_cursor"), []byte(`null`)},
|
||||
{[]byte("threads_cursor"), []byte(`25`)},
|
||||
{[]byte("users_cursor"), []byte(`3`)},
|
||||
}
|
||||
|
||||
if len(values) != len(expected) {
|
||||
t.Fatal("len(values) != len(expected)")
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
if !bytes.Equal(values[i].Key, expected[i].Key) {
|
||||
t.Error(string(values[i].Key), " != ", string(expected[i].Key))
|
||||
}
|
||||
|
||||
if !bytes.Equal(values[i].Value, expected[i].Value) {
|
||||
t.Error(string(values[i].Value), " != ", string(expected[i].Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet3(t *testing.T) {
|
||||
values := Get(input7, [][]byte{[]byte("data")})
|
||||
v := values[0].Value
|
||||
|
||||
if !bytes.Equal(v[len(v)-11:], []byte(`Rangnekar"}`)) {
|
||||
t.Fatal("corrupt ending")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet4(t *testing.T) {
|
||||
exp := `"# \n\n@@@java\npackage main\n\nimport (\n \"net/http\"\n \"strings\"\n\n \"github.com/gin-gonic/gin\"\n)\n\nfunc main() {\n r := gin.Default()\n r.LoadHTMLGlob(\"templates/*\")\n\n r.GET(\"/\", handleIndex)\n r.GET(\"/to/:name\", handleIndex)\n r.Run()\n}\n\n// Hello is page data for the template\ntype Hello struct {\n Name string\n}\n\nfunc handleIndex(c *gin.Context) {\n name := c.Param(\"name\")\n if name != \"\" {\n name = strings.TrimPrefix(c.Param(\"name\"), \"/\")\n }\n c.HTML(http.StatusOK, \"hellofly.tmpl\", gin.H{\"Name\": name})\n}\n@@@\n\n\\"`
|
||||
|
||||
exp = strings.ReplaceAll(exp, "@", "`")
|
||||
|
||||
values := Get(input8, [][]byte{[]byte("body")})
|
||||
|
||||
if string(values[0].Key) != "body" {
|
||||
t.Fatal("unexpected key")
|
||||
}
|
||||
|
||||
if string(values[0].Value) != exp {
|
||||
fmt.Println(string(values[0].Value))
|
||||
t.Fatal("unexpected value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValue(t *testing.T) {
|
||||
v1 := []byte("12345")
|
||||
if !bytes.Equal(Value(v1), v1) {
|
||||
@ -225,9 +313,12 @@ func TestValue(t *testing.T) {
|
||||
|
||||
func TestFilter1(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
Filter(&b, []byte(input2), []string{"id", "full_name", "embed"})
|
||||
err := Filter(&b, []byte(input2), []string{"id", "full_name", "embed"})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expected := `[{"id": 1,"full_name": "Sidney Stroman","embed": {"id": 8,"full_name": "Caroll Orn Sr.","email": "joannarau@hegmann.io","__twitter_id": "ABC123"}},{"id": 2,"full_name": "Jerry Dickinson"}]`
|
||||
expected := `[{"id": 1,"full_name": "Sidney St[1]roman","embed": {"id": 8,"full_name": "Caroll Orn Sr.","email": "joannarau@hegmann.io","__twitter_id": "ABC123"}},{"id": 2,"full_name": "Jerry Dickinson"}]`
|
||||
|
||||
if b.String() != expected {
|
||||
t.Error("Does not match expected json")
|
||||
@ -238,7 +329,10 @@ func TestFilter2(t *testing.T) {
|
||||
value := `[{"id":1,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":100,"amount_refunded":0,"date":"01/01/2019","application":null,"billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}}, {"id":2,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":150,"amount_refunded":0,"date":"02/18/2019","billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}},{"id":3,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":150,"amount_refunded":50,"date":"03/21/2019","billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}}]`
|
||||
|
||||
var b bytes.Buffer
|
||||
Filter(&b, []byte(value), []string{"id"})
|
||||
err := Filter(&b, []byte(value), []string{"id"})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expected := `[{"id":1},{"id":2},{"id":3}]`
|
||||
|
||||
@ -253,7 +347,7 @@ func TestStrip(t *testing.T) {
|
||||
|
||||
expected := []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)
|
||||
|
||||
if bytes.Equal(value1, expected) == false {
|
||||
if !bytes.Equal(value1, expected) {
|
||||
t.Log(value1)
|
||||
t.Error("[Valid path] Does not match expected json")
|
||||
}
|
||||
@ -261,7 +355,7 @@ func TestStrip(t *testing.T) {
|
||||
path2 := [][]byte{[]byte("boo"), []byte("hoo")}
|
||||
value2 := Strip([]byte(input3), path2)
|
||||
|
||||
if bytes.Equal(value2, []byte(input3)) == false {
|
||||
if !bytes.Equal(value2, []byte(input3)) {
|
||||
t.Log(value2)
|
||||
t.Error("[Invalid path] Does not match expected json")
|
||||
}
|
||||
@ -300,7 +394,7 @@ func TestReplace(t *testing.T) {
|
||||
|
||||
expected := `{ "users" : [{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"full_name": "Sidney St[1]roman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
@ -332,7 +426,7 @@ func TestReplace(t *testing.T) {
|
||||
func TestReplaceEmpty(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
json := `{ "users" : [{"id":1,"full_name":"Sidney Stroman","email":"user0@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":2,"full_name":"Jerry Dickinson","email":"user1@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":3,"full_name":"Kenna Cassin","email":"user2@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":4,"full_name":"Mr. Pat Parisian","email":"rodney@kautzer.biz","__users_twitter_id":"2048666903444506956"}, {"id":5,"full_name":"Bette Ebert","email":"janeenrath@goyette.com","__users_twitter_id":"2048666903444506956"}, {"id":6,"full_name":"Everett Kiehn","email":"michael@bartoletti.com","__users_twitter_id":"2048666903444506956"}, {"id":7,"full_name":"Katrina Cronin","email":"loretaklocko@framivolkman.org","__users_twitter_id":"2048666903444506956"}, {"id":8,"full_name":"Caroll Orn Sr.","email":"joannarau@hegmann.io","__users_twitter_id":"2048666903444506956"}, {"id":9,"full_name":"Gwendolyn Ziemann","email":"renaytoy@rutherford.co","__users_twitter_id":"2048666903444506956"}, {"id":10,"full_name":"Mrs. Rosann Fritsch","email":"holliemosciski@thiel.org","__users_twitter_id":"2048666903444506956"}, {"id":11,"full_name":"Arden Koss","email":"cristobalankunding@howewelch.org","__users_twitter_id":"2048666903444506956"}, {"id":12,"full_name":"Brenton Bauch PhD","email":"renee@miller.co","__users_twitter_id":"2048666903444506956"}, {"id":13,"full_name":"Daine Gleichner","email":"andrea@nienow.co","__users_twitter_id":"2048666903444506956"}] }`
|
||||
json := `{ "users" : [{"id":1,"full_name":"Sidney St[1]roman","email":"user0@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":2,"full_name":"Jerry Dickinson","email":"user1@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":3,"full_name":"Kenna Cassin","email":"user2@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":4,"full_name":"Mr. Pat Parisian","email":"rodney@kautzer.biz","__users_twitter_id":"2048666903444506956"}, {"id":5,"full_name":"Bette Ebert","email":"janeenrath@goyette.com","__users_twitter_id":"2048666903444506956"}, {"id":6,"full_name":"Everett Kiehn","email":"michael@bartoletti.com","__users_twitter_id":"2048666903444506956"}, {"id":7,"full_name":"Katrina Cronin","email":"loretaklocko@framivolkman.org","__users_twitter_id":"2048666903444506956"}, {"id":8,"full_name":"Caroll Orn Sr.","email":"joannarau@hegmann.io","__users_twitter_id":"2048666903444506956"}, {"id":9,"full_name":"Gwendolyn Ziemann","email":"renaytoy@rutherford.co","__users_twitter_id":"2048666903444506956"}, {"id":10,"full_name":"Mrs. Rosann Fritsch","email":"holliemosciski@thiel.org","__users_twitter_id":"2048666903444506956"}, {"id":11,"full_name":"Arden Koss","email":"cristobalankunding@howewelch.org","__users_twitter_id":"2048666903444506956"}, {"id":12,"full_name":"Brenton Bauch PhD","email":"renee@miller.co","__users_twitter_id":"2048666903444506956"}, {"id":13,"full_name":"Daine Gleichner","email":"andrea@nienow.co","__users_twitter_id":"2048666903444506956"}] }`
|
||||
|
||||
err := Replace(&buf, []byte(json), []Field{}, []Field{})
|
||||
if err != nil {
|
||||
@ -374,10 +468,6 @@ func TestKeys2(t *testing.T) {
|
||||
"id", "posts", "title", "description", "full_name", "email", "books", "name", "description",
|
||||
}
|
||||
|
||||
// for i := range fields {
|
||||
// fmt.Println("-->", string(fields[i]))
|
||||
// }
|
||||
|
||||
if len(exp) != len(fields) {
|
||||
t.Errorf("Expected %d fields %d", len(exp), len(fields))
|
||||
}
|
||||
@ -393,7 +483,7 @@ func TestKeys3(t *testing.T) {
|
||||
json := `{
|
||||
"insert": {
|
||||
"created_at": "now",
|
||||
"test": { "type1": "a", "type2": "b" },
|
||||
"test_1a": { "type1": "a", "type2": "b" },
|
||||
"name": "Hello",
|
||||
"updated_at": "now",
|
||||
"description": "World"
|
||||
@ -404,7 +494,7 @@ func TestKeys3(t *testing.T) {
|
||||
fields := Keys([]byte(json))
|
||||
|
||||
exp := []string{
|
||||
"insert", "created_at", "test", "type1", "type2", "name", "updated_at", "description",
|
||||
"insert", "created_at", "test_1a", "type1", "type2", "name", "updated_at", "description",
|
||||
"user",
|
||||
}
|
||||
|
||||
|
47
jsn/keys.go
47
jsn/keys.go
@ -10,14 +10,27 @@ func Keys(b []byte) [][]byte {
|
||||
|
||||
st := NewStack()
|
||||
ae := 0
|
||||
instr := false
|
||||
slash := 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if instr && b[i] == '\\' {
|
||||
slash++
|
||||
continue
|
||||
}
|
||||
|
||||
if b[i] == '"' && (slash%2 == 0) {
|
||||
instr = !instr
|
||||
}
|
||||
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
if !instr {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +59,7 @@ func Keys(b []byte) [][]byte {
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
case state == expectKeyClose && (b[i] == '"' && (slash%2 == 0)):
|
||||
state = expectColon
|
||||
k = b[(s + 1):i]
|
||||
|
||||
@ -57,7 +70,7 @@ func Keys(b []byte) [][]byte {
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
case state == expectString && (b[i] == '"' && (slash%2 == 0)):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '{':
|
||||
@ -88,8 +101,7 @@ func Keys(b []byte) [][]byte {
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
e = i - 1
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
@ -101,8 +113,9 @@ func Keys(b []byte) [][]byte {
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
s = i
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
case state == expectNull && (b[i-1] == 'l' && b[i] == 'l'):
|
||||
e = i
|
||||
}
|
||||
|
||||
@ -111,11 +124,25 @@ func Keys(b []byte) [][]byte {
|
||||
res = append(res, k)
|
||||
}
|
||||
|
||||
if state == expectListClose {
|
||||
loop:
|
||||
for j := i + 1; j < len(b); j++ {
|
||||
switch b[j] {
|
||||
case ' ', '\t', '\n':
|
||||
continue
|
||||
case '{':
|
||||
break loop
|
||||
}
|
||||
i = e
|
||||
break loop
|
||||
}
|
||||
}
|
||||
state = expectKey
|
||||
k = nil
|
||||
e = 0
|
||||
}
|
||||
|
||||
slash = 0
|
||||
}
|
||||
|
||||
return res
|
||||
|
@ -12,12 +12,21 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
return errors.New("'from' and 'to' must be of the same length")
|
||||
}
|
||||
|
||||
if len(from) == 0 || len(to) == 0 {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
h := xxhash.New()
|
||||
tmap := make(map[uint64]int, len(from))
|
||||
|
||||
for i, f := range from {
|
||||
h.Write(f.Key)
|
||||
h.Write(f.Value)
|
||||
if _, err := h.Write(f.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := h.Write(f.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmap[h.Sum64()] = i
|
||||
h.Reset()
|
||||
@ -28,18 +37,32 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
state := expectKey
|
||||
ws, we := -1, len(b)
|
||||
|
||||
instr := false
|
||||
slash := 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if instr && b[i] == '\\' {
|
||||
slash++
|
||||
continue
|
||||
}
|
||||
|
||||
// skip any left padding whitespace
|
||||
if ws == -1 && (b[i] == '{' || b[i] == '[') {
|
||||
ws = i
|
||||
}
|
||||
|
||||
if b[i] == '"' && (slash%2 == 0) {
|
||||
instr = !instr
|
||||
}
|
||||
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
if !instr {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,9 +71,11 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
case state == expectKeyClose && (b[i] == '"' && (slash%2 == 0)):
|
||||
state = expectColon
|
||||
h.Write(b[(s + 1):i])
|
||||
if _, err := h.Write(b[(s + 1):i]); err != nil {
|
||||
return err
|
||||
}
|
||||
we = s
|
||||
|
||||
case state == expectColon && b[i] == ':':
|
||||
@ -60,7 +85,7 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
case state == expectString && (b[i] == '"' && (slash%2 == 0)):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
@ -86,8 +111,7 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
e = i - 1
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
@ -99,15 +123,18 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
s = i
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
case state == expectNull && (b[i-1] == 'l' && b[i] == 'l'):
|
||||
e = i
|
||||
}
|
||||
|
||||
if e != 0 {
|
||||
e++
|
||||
|
||||
h.Write(b[s:e])
|
||||
if _, err := h.Write(b[s:e]); err != nil {
|
||||
return err
|
||||
}
|
||||
n, ok := tmap[h.Sum64()]
|
||||
h.Reset()
|
||||
|
||||
@ -152,11 +179,13 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
e = 0
|
||||
d = 0
|
||||
}
|
||||
|
||||
slash = 0
|
||||
}
|
||||
|
||||
if ws == -1 || (ws == 0 && we == len(b)) {
|
||||
w.Write(b)
|
||||
} else {
|
||||
} else if ws < we {
|
||||
w.Write(b[ws:we])
|
||||
}
|
||||
|
||||
|
35
jsn/strip.go
35
jsn/strip.go
@ -11,14 +11,27 @@ func Strip(b []byte, path [][]byte) []byte {
|
||||
pi := 0
|
||||
pm := false
|
||||
state := expectKey
|
||||
instr := false
|
||||
slash := 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if instr && b[i] == '\\' {
|
||||
slash++
|
||||
continue
|
||||
}
|
||||
|
||||
if b[i] == '"' && (slash%2 == 0) {
|
||||
instr = !instr
|
||||
}
|
||||
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
if !instr {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,7 +40,7 @@ func Strip(b []byte, path [][]byte) []byte {
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
case state == expectKeyClose && (b[i] == '"' && (slash%2 == 0)):
|
||||
state = expectColon
|
||||
if pi == len(path) {
|
||||
pi = 0
|
||||
@ -44,7 +57,7 @@ func Strip(b []byte, path [][]byte) []byte {
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
case state == expectString && (b[i] == '"' && (slash%2 == 0)):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
@ -70,8 +83,7 @@ func Strip(b []byte, path [][]byte) []byte {
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
e = i - 1
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
@ -83,8 +95,9 @@ func Strip(b []byte, path [][]byte) []byte {
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
s = i
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
case state == expectNull && (b[i-1] == 'l' && b[i] == 'l'):
|
||||
e = i
|
||||
}
|
||||
|
||||
@ -101,6 +114,8 @@ func Strip(b []byte, path [][]byte) []byte {
|
||||
state = expectKey
|
||||
e = 0
|
||||
}
|
||||
|
||||
slash = 0
|
||||
}
|
||||
|
||||
return ob
|
||||
|
37
jsn/test.go
Normal file
37
jsn/test.go
Normal file
@ -0,0 +1,37 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
)
|
||||
|
||||
func unifiedTest(data []byte) error {
|
||||
err1 := Validate(string(data))
|
||||
|
||||
var b1 bytes.Buffer
|
||||
err2 := Filter(&b1, data, []string{"id", "full_name", "embed"})
|
||||
|
||||
path1 := [][]byte{[]byte("data"), []byte("users")}
|
||||
Strip(data, path1)
|
||||
|
||||
from := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
}
|
||||
|
||||
to := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`"1234567890"`)},
|
||||
{[]byte("some_list"), []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)},
|
||||
}
|
||||
|
||||
var b2 bytes.Buffer
|
||||
err3 := Replace(&b2, data, from, to)
|
||||
|
||||
Keys(data)
|
||||
|
||||
if err1 != nil || err2 != nil || err3 != nil {
|
||||
return errors.New("there was an error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
1
jsn/test7.json
Normal file
1
jsn/test7.json
Normal file
File diff suppressed because one or more lines are too long
7
jsn/test8.json
Normal file
7
jsn/test8.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"data": {
|
||||
"slug": "javapackage-mainimport-nethttp-strings-githubcomgi-2786",
|
||||
"published": true,
|
||||
"body": "# \n\n```java\npackage main\n\nimport (\n \"net/http\"\n \"strings\"\n\n \"github.com/gin-gonic/gin\"\n)\n\nfunc main() {\n r := gin.Default()\n r.LoadHTMLGlob(\"templates/*\")\n\n r.GET(\"/\", handleIndex)\n r.GET(\"/to/:name\", handleIndex)\n r.Run()\n}\n\n// Hello is page data for the template\ntype Hello struct {\n Name string\n}\n\nfunc handleIndex(c *gin.Context) {\n name := c.Param(\"name\")\n if name != \"\" {\n name = strings.TrimPrefix(c.Param(\"name\"), \"/\")\n }\n c.HTML(http.StatusOK, \"hellofly.tmpl\", gin.H{\"Name\": name})\n}\n```\n\n\\"
|
||||
}
|
||||
}
|
@ -5,7 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
func Tree(v []byte) (map[string]interface{}, bool, error) {
|
||||
func Tree(v []byte) (map[string]json.RawMessage, bool, error) {
|
||||
dec := json.NewDecoder(bytes.NewReader(v))
|
||||
array := false
|
||||
|
||||
@ -25,7 +25,7 @@ func Tree(v []byte) (map[string]interface{}, bool, error) {
|
||||
}
|
||||
|
||||
// while the array contains values
|
||||
var m map[string]interface{}
|
||||
var m map[string]json.RawMessage
|
||||
|
||||
// decode an array value (Message)
|
||||
err := dec.Decode(&m)
|
||||
|
@ -2,7 +2,6 @@ package jsn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
@ -333,15 +332,6 @@ func b2s(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func s2b(s string) []byte {
|
||||
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
var sh reflect.SliceHeader
|
||||
sh.Data = strh.Data
|
||||
sh.Len = strh.Len
|
||||
sh.Cap = strh.Len
|
||||
return *(*[]byte)(unsafe.Pointer(&sh))
|
||||
}
|
||||
|
||||
const maxStartEndStringLen = 80
|
||||
|
||||
func startEndString(s string) string {
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var migrationPattern = regexp.MustCompile(`\A(\d+)_.+\.sql\z`)
|
||||
var migrationPattern = regexp.MustCompile(`\A(\d+)_[^\.]+\.sql\z`)
|
||||
|
||||
var ErrNoFwMigration = errors.Errorf("no sql in forward migration step")
|
||||
|
||||
@ -127,7 +127,7 @@ func FindMigrationsEx(path string, fs MigratorFS) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mcount := len(paths) + 100
|
||||
mcount := len(paths)
|
||||
|
||||
if n < int64(mcount) {
|
||||
return nil, fmt.Errorf("Duplicate migration %d", n)
|
||||
@ -244,7 +244,6 @@ func (m *Migrator) AppendMigration(name, upSQL, downSQL string) {
|
||||
UpSQL: upSQL,
|
||||
DownSQL: downSQL,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Migrate runs pending migrations
|
||||
@ -258,7 +257,7 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
|
||||
ctx := context.Background()
|
||||
// Lock to ensure multiple migrations cannot occur simultaneously
|
||||
lockNum := int64(9628173550095224) // arbitrary random number
|
||||
if _, lockErr := m.conn.Exec(ctx, "select pg_advisory_lock($1)", lockNum); lockErr != nil {
|
||||
if _, lockErr := m.conn.Exec(ctx, "select pg_try_advisory_lock($1)", lockNum); lockErr != nil {
|
||||
return lockErr
|
||||
}
|
||||
defer func() {
|
||||
@ -315,7 +314,7 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback(ctx)
|
||||
defer tx.Rollback(ctx) //nolint: errcheck
|
||||
|
||||
// Fire on start callback
|
||||
if m.OnStart != nil {
|
||||
@ -332,7 +331,9 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
|
||||
}
|
||||
|
||||
// Reset all database connection settings. Important to do before updating version as search_path may have been changed.
|
||||
tx.Exec(ctx, "reset all")
|
||||
// if _, err := tx.Exec(ctx, "reset all"); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// Add one to the version
|
||||
_, err = tx.Exec(ctx, "update "+m.versionTable+" set version=$1", sequence)
|
||||
@ -352,16 +353,14 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
|
||||
}
|
||||
|
||||
func (m *Migrator) GetCurrentVersion() (v int32, err error) {
|
||||
ctx := context.Background()
|
||||
err = m.conn.QueryRow(context.Background(),
|
||||
"select version from "+m.versionTable).Scan(&v)
|
||||
|
||||
err = m.conn.QueryRow(ctx, "select version from "+m.versionTable).Scan(&v)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (m *Migrator) ensureSchemaVersionTableExists() (err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err = m.conn.Exec(ctx, fmt.Sprintf(`
|
||||
_, err = m.conn.Exec(context.Background(), fmt.Sprintf(`
|
||||
create table if not exists %s(version int4 not null);
|
||||
|
||||
insert into %s(version)
|
||||
|
216
psql/columns.go
Normal file
216
psql/columns.go
Normal file
@ -0,0 +1,216 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderBaseColumns(
|
||||
sel *qcode.Select,
|
||||
ti *DBTableInfo,
|
||||
childCols []*qcode.Column,
|
||||
skipped uint32) ([]int, bool, error) {
|
||||
|
||||
var realColsRendered []int
|
||||
|
||||
colcount := (len(sel.Cols) + len(sel.OrderBy) + 1)
|
||||
colmap := make(map[string]struct{}, colcount)
|
||||
|
||||
isSearch := sel.Args["search"] != nil
|
||||
isCursorPaged := sel.Paging.Type != qcode.PtOffset
|
||||
isAgg := false
|
||||
|
||||
i := 0
|
||||
for n, col := range sel.Cols {
|
||||
cn := col.Name
|
||||
colmap[cn] = struct{}{}
|
||||
|
||||
_, isRealCol := ti.ColMap[cn]
|
||||
|
||||
if isRealCol {
|
||||
c.renderComma(i)
|
||||
realColsRendered = append(realColsRendered, n)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
|
||||
} else {
|
||||
switch {
|
||||
case isSearch && cn == "search_rank":
|
||||
if err := c.renderColumnSearchRank(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
case isSearch && strings.HasPrefix(cn, "search_headline_"):
|
||||
if err := c.renderColumnSearchHeadline(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
case cn == "__typename":
|
||||
if err := c.renderColumnTypename(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(cn, "_cursor"):
|
||||
continue
|
||||
|
||||
default:
|
||||
if err := c.renderColumnFunction(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
isAgg = true
|
||||
}
|
||||
}
|
||||
i++
|
||||
|
||||
}
|
||||
|
||||
if isCursorPaged {
|
||||
if _, ok := colmap[ti.PrimaryCol.Key]; !ok {
|
||||
colmap[ti.PrimaryCol.Key] = struct{}{}
|
||||
c.renderComma(i)
|
||||
colWithTable(c.w, ti.Name, ti.PrimaryCol.Name)
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
for _, ob := range sel.OrderBy {
|
||||
if _, ok := colmap[ob.Col]; ok {
|
||||
continue
|
||||
}
|
||||
colmap[ob.Col] = struct{}{}
|
||||
c.renderComma(i)
|
||||
colWithTable(c.w, ti.Name, ob.Col)
|
||||
i++
|
||||
}
|
||||
|
||||
for _, col := range childCols {
|
||||
if _, ok := colmap[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
c.renderComma(i)
|
||||
colWithTable(c.w, col.Table, col.Name)
|
||||
i++
|
||||
}
|
||||
|
||||
return realColsRendered, isAgg, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnSearchRank(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
if isColumnBlocked(sel, col.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ti.TSVCol == nil {
|
||||
return errors.New("no ts_vector column found")
|
||||
}
|
||||
cn := ti.TSVCol.Name
|
||||
arg := sel.Args["search"]
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
//fmt.Fprintf(w, `ts_rank("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Name, cn, arg.Val, col.Name)
|
||||
io.WriteString(c.w, `ts_rank(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `, websearch_to_tsquery('{{`)
|
||||
} else {
|
||||
io.WriteString(c.w, `, to_tsquery('{{`)
|
||||
}
|
||||
io.WriteString(c.w, arg.Val)
|
||||
io.WriteString(c.w, `}}'))`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnSearchHeadline(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
cn := col.Name[16:]
|
||||
|
||||
if isColumnBlocked(sel, cn) {
|
||||
return nil
|
||||
}
|
||||
arg := sel.Args["search"]
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
//fmt.Fprintf(w, `ts_headline("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Name, cn, arg.Val, col.Name)
|
||||
io.WriteString(c.w, `ts_headline(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `, websearch_to_tsquery('{{`)
|
||||
} else {
|
||||
io.WriteString(c.w, `, to_tsquery('{{`)
|
||||
}
|
||||
io.WriteString(c.w, arg.Val)
|
||||
io.WriteString(c.w, `}}'))`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnTypename(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
if isColumnBlocked(sel, col.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
io.WriteString(c.w, `(`)
|
||||
squoted(c.w, ti.Name)
|
||||
io.WriteString(c.w, ` :: text)`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnFunction(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
pl := funcPrefixLen(col.Name)
|
||||
// if pl == 0 {
|
||||
// //fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
// io.WriteString(c.w, `'`)
|
||||
// io.WriteString(c.w, col.Name)
|
||||
// io.WriteString(c.w, ` not defined'`)
|
||||
// alias(c.w, col.Name)
|
||||
// }
|
||||
|
||||
if pl == 0 || !sel.Functions {
|
||||
return nil
|
||||
}
|
||||
|
||||
cn := col.Name[pl:]
|
||||
|
||||
if isColumnBlocked(sel, cn) {
|
||||
return nil
|
||||
}
|
||||
|
||||
fn := col.Name[:pl-1]
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
|
||||
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Name, cn, col.Name)
|
||||
io.WriteString(c.w, fn)
|
||||
io.WriteString(c.w, `(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
io.WriteString(c.w, `)`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderComma(columnsRendered int) {
|
||||
if columnsRendered != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
}
|
||||
|
||||
func isColumnBlocked(sel *qcode.Select, name string) bool {
|
||||
if len(sel.Allowed) != 0 {
|
||||
if _, ok := sel.Allowed[name]; !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
54
psql/fuzz.go
Normal file
54
psql/fuzz.go
Normal file
@ -0,0 +1,54 @@
|
||||
// +build gofuzz
|
||||
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
|
||||
|
||||
schema = getTestSchema()
|
||||
|
||||
vars = NewVariables(map[string]string{
|
||||
"admin_account_id": "5",
|
||||
})
|
||||
|
||||
pcompileTest = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
)
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func Fuzz(data []byte) int {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
qc, err := qcompileTest.Compile([]byte(gql), "user")
|
||||
if err != nil {
|
||||
panic("qcompile can't fail")
|
||||
}
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
357
psql/insert.go
357
psql/insert.go
@ -1,249 +1,196 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
var zeroPaging = qcode.Paging{}
|
||||
func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
func (co *Compiler) compileMutation(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
insert, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
|
||||
}
|
||||
if len(insert) == 0 {
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
|
||||
io.WriteString(c.w, qc.ActionVar)
|
||||
io.WriteString(c.w, `}}' :: json AS j)`)
|
||||
|
||||
ti, err := c.schema.GetTable(root.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
st := util.NewStack()
|
||||
st.Push(kvitem{_type: itemInsert, key: ti.Name, val: insert, ti: ti})
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
if insert[0] == '[' && st.Len() > 1 {
|
||||
return 0, errors.New("Nested bulk insert not supported")
|
||||
}
|
||||
intf := st.Pop()
|
||||
|
||||
switch item := intf.(type) {
|
||||
case kvitem:
|
||||
if err := c.handleKVItem(st, item); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case renitem:
|
||||
var err error
|
||||
|
||||
// if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
|
||||
// io.WriteString(c.w, ` WHERE false`)
|
||||
// }
|
||||
|
||||
switch item._type {
|
||||
case itemInsert:
|
||||
err = c.renderInsertStmt(qc, w, item)
|
||||
case itemConnect:
|
||||
err = c.renderConnectStmt(qc, w, item)
|
||||
case itemUnion:
|
||||
err = c.renderUnionStmt(w, item)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
c.w.WriteString(`WITH `)
|
||||
quoted(c.w, ti.Name)
|
||||
c.w.WriteString(` AS `)
|
||||
|
||||
switch root.Action {
|
||||
case qcode.ActionInsert:
|
||||
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.ActionUpdate:
|
||||
if _, err := c.renderUpdate(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.ActionUpsert:
|
||||
if _, err := c.renderUpsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.ActionDelete:
|
||||
if _, err := c.renderDelete(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, errors.New("valid mutations are 'insert', 'update', 'upsert' and 'delete'")
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` RETURNING *) `)
|
||||
|
||||
root.Paging = zeroPaging
|
||||
root.DistinctOn = root.DistinctOn[:]
|
||||
root.OrderBy = root.OrderBy[:]
|
||||
root.Where = nil
|
||||
root.Args = nil
|
||||
|
||||
return c.compileQuery(qc, w)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderInsert(qc *qcode.QCode, w *bytes.Buffer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
func (c *compilerContext) renderInsertStmt(qc *qcode.QCode, w io.Writer, item renitem) error {
|
||||
|
||||
insert, ok := vars[root.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Variable '%s' not defined", root.ActionVar)
|
||||
}
|
||||
ti := item.ti
|
||||
jt := item.data
|
||||
sk := nestedInsertRelColumnsMap(item.kvitem)
|
||||
|
||||
jt, array, err := jsn.Tree(insert)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
io.WriteString(c.w, `, `)
|
||||
renderCteName(w, item.kvitem)
|
||||
io.WriteString(w, ` AS (`)
|
||||
|
||||
c.w.WriteString(`(WITH "input" AS (SELECT {{`)
|
||||
c.w.WriteString(root.ActionVar)
|
||||
c.w.WriteString(`}}::json AS j) INSERT INTO `)
|
||||
quoted(c.w, ti.Name)
|
||||
io.WriteString(c.w, ` (`)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
io.WriteString(c.w, `)`)
|
||||
io.WriteString(w, `INSERT INTO `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, ` (`)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, false)
|
||||
renderNestedInsertRelColumns(w, item.kvitem, false)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
c.w.WriteString(` SELECT `)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
c.w.WriteString(` FROM input i, `)
|
||||
io.WriteString(w, ` SELECT `)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, true)
|
||||
renderNestedInsertRelColumns(w, item.kvitem, true)
|
||||
|
||||
if array {
|
||||
c.w.WriteString(`json_populate_recordset`)
|
||||
io.WriteString(w, ` FROM "_sg_input" i, `)
|
||||
renderNestedInsertRelTables(w, item.kvitem)
|
||||
|
||||
if item.array {
|
||||
io.WriteString(w, `json_populate_recordset`)
|
||||
} else {
|
||||
c.w.WriteString(`json_populate_record`)
|
||||
io.WriteString(w, `json_populate_record`)
|
||||
}
|
||||
|
||||
c.w.WriteString(`(NULL::`)
|
||||
c.w.WriteString(ti.Name)
|
||||
c.w.WriteString(`, i.j) t`)
|
||||
io.WriteString(w, `(NULL::`)
|
||||
io.WriteString(w, ti.Name)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderInsertUpdateColumns(qc *qcode.QCode, w *bytes.Buffer,
|
||||
jt map[string]interface{}, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
i := 0
|
||||
for _, cn := range ti.ColumnNames {
|
||||
if _, ok := jt[cn]; !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
c.w.WriteString(cn)
|
||||
i++
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w *bytes.Buffer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
update, ok := vars[root.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Variable '%s' not defined", root.ActionVar)
|
||||
}
|
||||
|
||||
jt, array, err := jsn.Tree(update)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c.w.WriteString(`(WITH "input" AS (SELECT {{`)
|
||||
c.w.WriteString(root.ActionVar)
|
||||
c.w.WriteString(`}}::json AS j) UPDATE `)
|
||||
quoted(c.w, ti.Name)
|
||||
io.WriteString(c.w, ` SET (`)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
|
||||
c.w.WriteString(`) = (SELECT `)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
c.w.WriteString(` FROM input i, `)
|
||||
|
||||
if array {
|
||||
c.w.WriteString(`json_populate_recordset`)
|
||||
if len(item.path) == 0 {
|
||||
io.WriteString(w, `, i.j) t RETURNING *)`)
|
||||
} else {
|
||||
c.w.WriteString(`json_populate_record`)
|
||||
io.WriteString(w, `, i.j->`)
|
||||
joinPath(w, item.path)
|
||||
io.WriteString(w, `) t RETURNING *)`)
|
||||
}
|
||||
|
||||
c.w.WriteString(`(NULL::`)
|
||||
c.w.WriteString(ti.Name)
|
||||
c.w.WriteString(`, i.j) t)`)
|
||||
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDelete(qc *qcode.QCode, w *bytes.Buffer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
func nestedInsertRelColumnsMap(item kvitem) map[string]struct{} {
|
||||
sk := make(map[string]struct{}, len(item.items))
|
||||
|
||||
c.w.WriteString(`(DELETE FROM `)
|
||||
quoted(c.w, ti.Name)
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
sk[item.relPC.Right.Col] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
sk[v.relCP.Right.Col] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
return sk
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpsert(qc *qcode.QCode, w *bytes.Buffer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
upsert, ok := vars[root.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Variable '%s' not defined", root.ActionVar)
|
||||
}
|
||||
|
||||
jt, _, err := jsn.Tree(upsert)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c.w.WriteString(` ON CONFLICT DO (`)
|
||||
i := 0
|
||||
|
||||
for _, cn := range ti.ColumnNames {
|
||||
if _, ok := jt[cn]; !ok {
|
||||
continue
|
||||
func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
if values {
|
||||
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
|
||||
} else {
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
}
|
||||
}
|
||||
|
||||
if col, ok := ti.Columns[cn]; !ok || !(col.UniqueKey || col.PrimaryKey) {
|
||||
continue
|
||||
} else {
|
||||
// Render child foreign key columns if child-to-parent
|
||||
// relationship is one-to-many
|
||||
i := 0
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
if i != 0 {
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
if values {
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `".`)
|
||||
quoted(w, v.relCP.Left.Col)
|
||||
} else {
|
||||
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
|
||||
}
|
||||
} else {
|
||||
quoted(w, v.relCP.Right.Col)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
c.w.WriteString(cn)
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
c.w.WriteString(ti.PrimaryCol)
|
||||
}
|
||||
c.w.WriteString(`) DO `)
|
||||
|
||||
c.w.WriteString(`UPDATE `)
|
||||
io.WriteString(c.w, ` SET `)
|
||||
|
||||
i = 0
|
||||
for _, cn := range ti.ColumnNames {
|
||||
if _, ok := jt[cn]; !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
c.w.WriteString(cn)
|
||||
io.WriteString(c.w, ` = EXCLUDED.`)
|
||||
c.w.WriteString(cn)
|
||||
i++
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func quoted(w *bytes.Buffer, identifier string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(identifier)
|
||||
w.WriteString(`"`)
|
||||
func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
} else {
|
||||
// Render tables needed to set values if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `", `)
|
||||
} else {
|
||||
quoted(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -12,173 +12,260 @@ func simpleInsert(t *testing.T) {
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "users" AS (WITH "input" AS (SELECT {{data}}::json AS j) INSERT INTO "users" (full_name, email) SELECT full_name, email FROM input i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id" FROM "users") AS "users_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func singleInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, insert: $insert) {
|
||||
product(id: $id, insert: $insert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{insert}}::json AS j) INSERT INTO "products" (name, description, user_id) SELECT name, description, user_id FROM input i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"insert": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc", "user_id": 5 }`),
|
||||
"insert": json.RawMessage(` { "name": "my_name", "price": 6.95, "description": "my_desc", "user_id": 5 }`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
compileGQLToPSQL(t, gql, vars, "anon")
|
||||
}
|
||||
|
||||
func bulkInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, insert: $insert) {
|
||||
product(name: "test", id: $id, insert: $insert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{insert}}::json AS j) INSERT INTO "products" (name, description) SELECT name, description FROM input i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"insert": json.RawMessage(` [{ "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }]`),
|
||||
"insert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
compileGQLToPSQL(t, gql, vars, "anon")
|
||||
}
|
||||
|
||||
func singleUpsert(t *testing.T) {
|
||||
func simpleInsertWithPresets(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, upsert: $upsert) {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{upsert}}::json AS j) INSERT INTO "products" (name, description) SELECT name, description FROM input i, json_populate_record(NULL::products, i.j) t ON CONFLICT DO (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
|
||||
"data": json.RawMessage(`{"name": "Tomato", "price": 5.76}`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func bulkUpsert(t *testing.T) {
|
||||
func nestedInsertManyToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, upsert: $upsert) {
|
||||
id
|
||||
name
|
||||
purchase(insert: $data) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{upsert}}::json AS j) INSERT INTO "products" (name, description) SELECT name, description FROM input i, json_populate_recordset(NULL::products, i.j) t ON CONFLICT DO (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` [{ "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }]`),
|
||||
"data": json.RawMessage(` {
|
||||
"sale_type": "bought",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func singleUpdate(t *testing.T) {
|
||||
func nestedInsertOneToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, update: $update, where: { id: { eq: 1 } }) {
|
||||
user(insert: $data) {
|
||||
id
|
||||
name
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{update}}::json AS j) UPDATE "products" SET (name, description) = (SELECT name, description FROM input i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."user_id") = {{user_id}}) AND (("products"."id") = 1) AND (("products"."id") = 15) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func delete(t *testing.T) {
|
||||
func nestedInsertOneToOne(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(delete: true, where: { id: { eq: 1 } }) {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (DELETE FROM "products" WHERE (("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 1) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"hey": {
|
||||
"now": "what's the matter"
|
||||
},
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToManyWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToOneWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
tags {
|
||||
id
|
||||
name
|
||||
}
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToOneWithConnectArray(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": { "id": [1,2] }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func TestCompileInsert(t *testing.T) {
|
||||
t.Run("simpleInsert", simpleInsert)
|
||||
t.Run("singleInsert", singleInsert)
|
||||
t.Run("bulkInsert", bulkInsert)
|
||||
t.Run("singleUpdate", singleUpdate)
|
||||
t.Run("singleUpsert", singleUpsert)
|
||||
t.Run("bulkUpsert", bulkUpsert)
|
||||
|
||||
t.Run("delete", delete)
|
||||
t.Run("simpleInsertWithPresets", simpleInsertWithPresets)
|
||||
t.Run("nestedInsertManyToMany", nestedInsertManyToMany)
|
||||
t.Run("nestedInsertOneToMany", nestedInsertOneToMany)
|
||||
t.Run("nestedInsertOneToOne", nestedInsertOneToOne)
|
||||
t.Run("nestedInsertOneToManyWithConnect", nestedInsertOneToManyWithConnect)
|
||||
t.Run("nestedInsertOneToOneWithConnect", nestedInsertOneToOneWithConnect)
|
||||
t.Run("nestedInsertOneToOneWithConnectArray", nestedInsertOneToOneWithConnectArray)
|
||||
}
|
||||
|
694
psql/mutate.go
Normal file
694
psql/mutate.go
Normal file
@ -0,0 +1,694 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemInsert itemType = iota + 1
|
||||
itemUpdate
|
||||
itemConnect
|
||||
itemDisconnect
|
||||
itemUnion
|
||||
)
|
||||
|
||||
var insertTypes = map[string]itemType{
|
||||
"connect": itemConnect,
|
||||
}
|
||||
|
||||
var updateTypes = map[string]itemType{
|
||||
"connect": itemConnect,
|
||||
"disconnect": itemDisconnect,
|
||||
}
|
||||
|
||||
var noLimit = qcode.Paging{NoLimit: true}
|
||||
|
||||
func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
|
||||
ti, err := c.schema.GetTable(root.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch qc.Type {
|
||||
case qcode.QTInsert:
|
||||
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.QTUpdate:
|
||||
if _, err := c.renderUpdate(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.QTUpsert:
|
||||
if _, err := c.renderUpsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.QTDelete:
|
||||
if _, err := c.renderDelete(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, errors.New("valid mutations are 'insert', 'update', 'upsert' and 'delete'")
|
||||
}
|
||||
|
||||
root.Paging = noLimit
|
||||
root.DistinctOn = root.DistinctOn[:]
|
||||
root.OrderBy = root.OrderBy[:]
|
||||
root.Where = nil
|
||||
root.Args = nil
|
||||
|
||||
return c.compileQuery(qc, w, vars)
|
||||
}
|
||||
|
||||
type kvitem struct {
|
||||
id int32
|
||||
_type itemType
|
||||
_ctype int
|
||||
key string
|
||||
path []string
|
||||
val json.RawMessage
|
||||
data map[string]json.RawMessage
|
||||
array bool
|
||||
ti *DBTableInfo
|
||||
relCP *DBRel
|
||||
relPC *DBRel
|
||||
items []kvitem
|
||||
}
|
||||
|
||||
type renitem struct {
|
||||
kvitem
|
||||
array bool
|
||||
data map[string]json.RawMessage
|
||||
}
|
||||
|
||||
// TODO: Handle cases where a column name matches the child table name
|
||||
// the child path needs to be exluded in the json sent to insert or update
|
||||
|
||||
func (c *compilerContext) handleKVItem(st *util.Stack, item kvitem) error {
|
||||
var data map[string]json.RawMessage
|
||||
var array bool
|
||||
var err error
|
||||
|
||||
if item.data == nil {
|
||||
data, array, err = jsn.Tree(item.val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
data, array = item.data, item.array
|
||||
}
|
||||
|
||||
var unionize bool
|
||||
id := item.id + 1
|
||||
|
||||
item.items = make([]kvitem, 0, len(data))
|
||||
|
||||
for k, v := range data {
|
||||
if v[0] != '{' && v[0] != '[' {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get child-to-parent relationship
|
||||
relCP, err := c.schema.GetRel(k, item.key)
|
||||
if err != nil {
|
||||
var ty itemType
|
||||
var ok bool
|
||||
|
||||
switch item._type {
|
||||
case itemInsert:
|
||||
ty, ok = insertTypes[k]
|
||||
case itemUpdate:
|
||||
ty, ok = updateTypes[k]
|
||||
}
|
||||
|
||||
if ok {
|
||||
unionize = true
|
||||
item1 := item
|
||||
item1._type = ty
|
||||
item1.id = id
|
||||
item1.val = v
|
||||
|
||||
item.items = append(item.items, item1)
|
||||
id++
|
||||
}
|
||||
|
||||
// Get parent-to-child relationship
|
||||
} else if relPC, err := c.schema.GetRel(item.key, k); err == nil {
|
||||
ti, err := c.schema.GetTable(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
item1 := kvitem{
|
||||
id: id,
|
||||
_type: item._type,
|
||||
key: k,
|
||||
val: v,
|
||||
path: append(item.path, k),
|
||||
ti: ti,
|
||||
relCP: relCP,
|
||||
relPC: relPC,
|
||||
}
|
||||
|
||||
if v[0] == '{' {
|
||||
item1.data, item1.array, err = jsn.Tree(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v1, ok := item1.data["connect"]; ok && (v1[0] == '{' || v1[0] == '[') {
|
||||
item1._ctype |= (1 << itemConnect)
|
||||
}
|
||||
if v1, ok := item1.data["disconnect"]; ok && (v1[0] == '{' || v1[0] == '[') {
|
||||
item1._ctype |= (1 << itemDisconnect)
|
||||
}
|
||||
}
|
||||
|
||||
item.items = append(item.items, item1)
|
||||
id++
|
||||
}
|
||||
}
|
||||
|
||||
if unionize {
|
||||
item._type = itemUnion
|
||||
}
|
||||
|
||||
// For inserts order the children according to
|
||||
// the creation order required by the parent-to-child
|
||||
// relationships. For example users need to be created
|
||||
// before the products they own.
|
||||
|
||||
// For updates the order defined in the query must be
|
||||
// the order used.
|
||||
switch item._type {
|
||||
case itemInsert:
|
||||
for _, v := range item.items {
|
||||
if v.relPC.Type == RelOneToMany {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
for _, v := range item.items {
|
||||
if v.relPC.Type == RelOneToOne {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
|
||||
case itemUpdate:
|
||||
for _, v := range item.items {
|
||||
if !(v._ctype > 0 && v.relPC.Type == RelOneToOne) {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relPC.Type == RelOneToOne {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
|
||||
case itemUnion:
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
for _, v := range item.items {
|
||||
st.Push(v)
|
||||
}
|
||||
|
||||
default:
|
||||
for _, v := range item.items {
|
||||
st.Push(v)
|
||||
}
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
|
||||
var connect, disconnect bool
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-many
|
||||
if item.relPC.Type != RelOneToMany {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, v := range item.items {
|
||||
if v._type == itemConnect {
|
||||
connect = true
|
||||
} else if v._type == itemDisconnect {
|
||||
disconnect = true
|
||||
}
|
||||
if connect && disconnect {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if connect {
|
||||
io.WriteString(w, `, `)
|
||||
if connect && disconnect {
|
||||
renderCteNameWithSuffix(w, item.kvitem, "c")
|
||||
} else {
|
||||
quoted(w, item.ti.Name)
|
||||
}
|
||||
io.WriteString(w, ` AS ( UPDATE `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, ` SET `)
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
io.WriteString(w, ` = `)
|
||||
|
||||
// When setting the id of the connected table in a one-to-many setting
|
||||
// we always overwrite the value including for array columns
|
||||
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
|
||||
|
||||
io.WriteString(w, ` FROM `)
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
io.WriteString(w, ` WHERE`)
|
||||
|
||||
i := 0
|
||||
for _, v := range item.items {
|
||||
if v._type == itemConnect {
|
||||
if i != 0 {
|
||||
io.WriteString(w, ` OR (`)
|
||||
} else {
|
||||
io.WriteString(w, ` (`)
|
||||
}
|
||||
if err := renderWhereFromJSON(w, v, "connect", v.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
i++
|
||||
}
|
||||
}
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, `.*)`)
|
||||
}
|
||||
|
||||
if disconnect {
|
||||
io.WriteString(w, `, `)
|
||||
if connect && disconnect {
|
||||
renderCteNameWithSuffix(w, item.kvitem, "d")
|
||||
} else {
|
||||
quoted(w, item.ti.Name)
|
||||
}
|
||||
io.WriteString(w, ` AS ( UPDATE `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, ` SET `)
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
io.WriteString(w, ` = `)
|
||||
|
||||
if item.relPC.Right.Array {
|
||||
io.WriteString(w, ` array_remove(`)
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
io.WriteString(w, `, `)
|
||||
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else {
|
||||
io.WriteString(w, ` NULL`)
|
||||
}
|
||||
|
||||
io.WriteString(w, ` FROM `)
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
io.WriteString(w, ` WHERE`)
|
||||
|
||||
i := 0
|
||||
for _, v := range item.items {
|
||||
if v._type == itemDisconnect {
|
||||
if i != 0 {
|
||||
io.WriteString(w, ` OR (`)
|
||||
} else {
|
||||
io.WriteString(w, ` (`)
|
||||
}
|
||||
if err := renderWhereFromJSON(w, v, "disconnect", v.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
i++
|
||||
}
|
||||
}
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, `.*)`)
|
||||
}
|
||||
|
||||
if connect && disconnect {
|
||||
io.WriteString(w, `, `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, ` AS (`)
|
||||
io.WriteString(w, `SELECT * FROM `)
|
||||
renderCteNameWithSuffix(w, item.kvitem, "c")
|
||||
io.WriteString(w, ` UNION ALL `)
|
||||
io.WriteString(w, `SELECT * FROM `)
|
||||
renderCteNameWithSuffix(w, item.kvitem, "d")
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderInsertUpdateColumns(w io.Writer,
|
||||
qc *qcode.QCode,
|
||||
jt map[string]json.RawMessage,
|
||||
ti *DBTableInfo,
|
||||
skipcols map[string]struct{},
|
||||
values bool) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
renderedCol := false
|
||||
|
||||
n := 0
|
||||
for _, cn := range ti.Columns {
|
||||
if _, ok := skipcols[cn.Name]; ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := jt[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := root.PresetMap[cn.Key]; ok {
|
||||
continue
|
||||
}
|
||||
if len(root.Allowed) != 0 {
|
||||
if _, ok := root.Allowed[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n != 0 {
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
|
||||
if values {
|
||||
colWithTable(w, "t", cn.Name)
|
||||
} else {
|
||||
quoted(w, cn.Name)
|
||||
}
|
||||
|
||||
if !renderedCol {
|
||||
renderedCol = true
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
for i := range root.PresetList {
|
||||
cn := root.PresetList[i]
|
||||
col, ok := ti.ColMap[cn]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := skipcols[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 || n != 0 {
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
|
||||
if values {
|
||||
io.WriteString(w, `'`)
|
||||
io.WriteString(w, root.PresetMap[cn])
|
||||
io.WriteString(w, `' :: `)
|
||||
io.WriteString(w, col.Type)
|
||||
} else {
|
||||
quoted(w, cn)
|
||||
}
|
||||
|
||||
if !renderedCol {
|
||||
renderedCol = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(skipcols) != 0 && renderedCol {
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
upsert, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
|
||||
}
|
||||
if len(upsert) == 0 {
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
if ti.PrimaryCol == nil {
|
||||
return 0, fmt.Errorf("no primary key column found")
|
||||
}
|
||||
|
||||
jt, _, err := jsn.Tree(upsert)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` ON CONFLICT (`)
|
||||
i := 0
|
||||
|
||||
for _, cn := range ti.Columns {
|
||||
if _, ok := jt[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if col, ok := ti.ColMap[cn.Key]; !ok || !(col.UniqueKey || col.PrimaryKey) {
|
||||
continue
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, cn.Name)
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
io.WriteString(c.w, ti.PrimaryCol.Name)
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
if root.Where != nil {
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` DO UPDATE SET `)
|
||||
|
||||
i = 0
|
||||
for _, cn := range ti.Columns {
|
||||
if _, ok := jt[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, cn.Name)
|
||||
io.WriteString(c.w, ` = EXCLUDED.`)
|
||||
io.WriteString(c.w, cn.Name)
|
||||
i++
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` RETURNING *) `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
|
||||
item renitem) error {
|
||||
|
||||
rel := item.relPC
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-one
|
||||
// For this to work the child needs to found first so it's primary key
|
||||
// can be set in the related column on the parent object.
|
||||
// Eg. Create product and connect a user to it.
|
||||
if rel.Type != RelOneToOne {
|
||||
return nil
|
||||
}
|
||||
|
||||
io.WriteString(w, `, "_x_`)
|
||||
io.WriteString(c.w, item.ti.Name)
|
||||
io.WriteString(c.w, `" AS (SELECT `)
|
||||
|
||||
if rel.Left.Array {
|
||||
io.WriteString(w, `array_agg(DISTINCT `)
|
||||
quoted(w, rel.Right.Col)
|
||||
io.WriteString(w, `) AS `)
|
||||
quoted(w, rel.Right.Col)
|
||||
|
||||
} else {
|
||||
quoted(w, rel.Right.Col)
|
||||
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM "_sg_input" i,`)
|
||||
quoted(c.w, item.ti.Name)
|
||||
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, ` LIMIT 1)`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDisconnectStmt(qc *qcode.QCode, w io.Writer,
|
||||
item renitem) error {
|
||||
|
||||
rel := item.relPC
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-one
|
||||
// For this to work the child needs to found first so it's
|
||||
// null value can beset in the related column on the parent object.
|
||||
// Eg. Update product and diconnect the user from it.
|
||||
if rel.Type != RelOneToOne {
|
||||
return nil
|
||||
}
|
||||
io.WriteString(w, `, "_x_`)
|
||||
io.WriteString(c.w, item.ti.Name)
|
||||
io.WriteString(c.w, `" AS (`)
|
||||
|
||||
if rel.Right.Array {
|
||||
io.WriteString(c.w, `SELECT `)
|
||||
quoted(w, rel.Right.Col)
|
||||
io.WriteString(c.w, ` FROM "_sg_input" i,`)
|
||||
quoted(c.w, item.ti.Name)
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, ` LIMIT 1))`)
|
||||
|
||||
} else {
|
||||
io.WriteString(c.w, `SELECT * FROM (VALUES(NULL::`)
|
||||
io.WriteString(w, rel.Right.col.Type)
|
||||
io.WriteString(c.w, `)) AS LOOKUP(`)
|
||||
quoted(w, rel.Right.Col)
|
||||
io.WriteString(c.w, `))`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderWhereFromJSON(w io.Writer, item kvitem, key string, val []byte) error {
|
||||
var kv map[string]json.RawMessage
|
||||
ti := item.ti
|
||||
|
||||
if err := json.Unmarshal(val, &kv); err != nil {
|
||||
return err
|
||||
}
|
||||
i := 0
|
||||
for k, v := range kv {
|
||||
col, ok := ti.ColMap[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(w, ` AND `)
|
||||
}
|
||||
|
||||
if v[0] == '[' {
|
||||
colWithTable(w, ti.Name, k)
|
||||
|
||||
if col.Array {
|
||||
io.WriteString(w, ` && `)
|
||||
} else {
|
||||
io.WriteString(w, ` = `)
|
||||
}
|
||||
|
||||
io.WriteString(w, `ANY((select a::`)
|
||||
io.WriteString(w, col.Type)
|
||||
|
||||
io.WriteString(w, ` AS list from json_array_elements_text(`)
|
||||
renderPathJSON(w, item, key, k)
|
||||
io.WriteString(w, `::json) AS a))`)
|
||||
|
||||
} else if col.Array {
|
||||
io.WriteString(w, `(`)
|
||||
renderPathJSON(w, item, key, k)
|
||||
io.WriteString(w, `)::`)
|
||||
io.WriteString(w, col.Type)
|
||||
|
||||
io.WriteString(w, ` = ANY(`)
|
||||
colWithTable(w, ti.Name, k)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else {
|
||||
colWithTable(w, ti.Name, k)
|
||||
|
||||
io.WriteString(w, `= (`)
|
||||
renderPathJSON(w, item, key, k)
|
||||
io.WriteString(w, `)::`)
|
||||
io.WriteString(w, col.Type)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderPathJSON(w io.Writer, item kvitem, key1, key2 string) {
|
||||
io.WriteString(w, `(i.j->`)
|
||||
joinPath(w, item.path)
|
||||
io.WriteString(w, `->'`)
|
||||
io.WriteString(w, key1)
|
||||
io.WriteString(w, `'->>'`)
|
||||
io.WriteString(w, key2)
|
||||
io.WriteString(w, `')`)
|
||||
}
|
||||
|
||||
func renderCteName(w io.Writer, item kvitem) error {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, item.ti.Name)
|
||||
if item._type == itemConnect || item._type == itemDisconnect {
|
||||
io.WriteString(w, `_`)
|
||||
int2string(w, item.id)
|
||||
}
|
||||
io.WriteString(w, `"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderCteNameWithSuffix(w io.Writer, item kvitem, suffix string) error {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, item.ti.Name)
|
||||
io.WriteString(w, `_`)
|
||||
io.WriteString(w, suffix)
|
||||
io.WriteString(w, `"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func joinPath(w io.Writer, path []string) {
|
||||
for i := range path {
|
||||
if i != 0 {
|
||||
io.WriteString(w, `->`)
|
||||
}
|
||||
io.WriteString(w, `'`)
|
||||
io.WriteString(w, path[i])
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
}
|
123
psql/mutate_test.go
Normal file
123
psql/mutate_test.go
Normal file
@ -0,0 +1,123 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func singleUpsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(upsert: $upsert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func singleUpsertWhere(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(upsert: $upsert, where: { price : { gt: 3 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func bulkUpsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(upsert: $upsert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func delete(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(delete: true, where: { id: { eq: 1 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
// func blockedInsert(t *testing.T) {
|
||||
// gql := `mutation {
|
||||
// user(insert: $data) {
|
||||
// id
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '{{data}}' :: json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
// }
|
||||
|
||||
// resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// fmt.Println(string(resSQL))
|
||||
|
||||
// if string(resSQL) != sql {
|
||||
// t.Fatal(errNotExpected)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func blockedUpdate(t *testing.T) {
|
||||
// gql := `mutation {
|
||||
// user(where: { id: { lt: 5 } }, update: $data) {
|
||||
// id
|
||||
// email
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '{{data}}' :: json AS j) UPDATE "users" SET ("full_name", "email") = (SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t) WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
// }
|
||||
|
||||
// resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
// if string(resSQL) != sql {
|
||||
// t.Fatal(errNotExpected)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestCompileMutate(t *testing.T) {
|
||||
t.Run("singleUpsert", singleUpsert)
|
||||
t.Run("singleUpsertWhere", singleUpsertWhere)
|
||||
t.Run("bulkUpsert", bulkUpsert)
|
||||
t.Run("delete", delete)
|
||||
// t.Run("blockedInsert", blockedInsert)
|
||||
// t.Run("blockedUpdate", blockedUpdate)
|
||||
}
|
237
psql/psql_test.go
Normal file
237
psql/psql_test.go
Normal file
@ -0,0 +1,237 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
const (
|
||||
errNotExpected = "Generated SQL did not match what was expected"
|
||||
headerMarker = "=== RUN"
|
||||
commentMarker = "---"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *Compiler
|
||||
expected map[string][]string
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
|
||||
qcompile, err = qcode.NewCompiler(qcode.Config{
|
||||
Blocklist: []string{
|
||||
"secret",
|
||||
"password",
|
||||
"token",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "product", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "name", "price", "users", "customers"},
|
||||
Filters: []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
},
|
||||
Insert: qcode.InsertConfig{
|
||||
Presets: map[string]string{
|
||||
"user_id": "$user_id",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
},
|
||||
},
|
||||
Update: qcode.UpdateConfig{
|
||||
Filters: []string{"{ user_id: { eq: $user_id } }"},
|
||||
Presets: map[string]string{"updated_at": "now"},
|
||||
},
|
||||
Delete: qcode.DeleteConfig{
|
||||
Filters: []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("anon", "product", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "name"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("anon1", "product", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "name", "price"},
|
||||
DisableFunctions: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "users", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "full_name", "avatar", "email", "products"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("bad_dude", "users", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Filters: []string{"false"},
|
||||
DisableFunctions: true,
|
||||
},
|
||||
Insert: qcode.InsertConfig{
|
||||
Filters: []string{"false"},
|
||||
},
|
||||
Update: qcode.UpdateConfig{
|
||||
Filters: []string{"false"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "mes", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "full_name", "avatar"},
|
||||
Filters: []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "customers", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "email", "full_name", "products"},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
schema := getTestSchema()
|
||||
|
||||
vars := NewVariables(map[string]string{
|
||||
"admin_account_id": "5",
|
||||
})
|
||||
|
||||
pcompile = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
|
||||
expected = make(map[string][]string)
|
||||
|
||||
b, err := ioutil.ReadFile("tests.sql")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
text := string(b)
|
||||
lines := strings.Split(text, "\n")
|
||||
|
||||
var h string
|
||||
|
||||
for _, v := range lines {
|
||||
switch {
|
||||
case strings.HasPrefix(v, headerMarker):
|
||||
h = strings.TrimSpace(v[len(headerMarker):])
|
||||
|
||||
case strings.HasPrefix(v, commentMarker):
|
||||
break
|
||||
|
||||
default:
|
||||
v := strings.TrimSpace(v)
|
||||
if len(v) != 0 {
|
||||
expected[h] = append(expected[h], v)
|
||||
}
|
||||
}
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(t *testing.T, gql string, vars Variables, role string) {
|
||||
generateTestFile := false
|
||||
|
||||
if generateTestFile {
|
||||
var sqlStmts []string
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
qc, err := qcompile.Compile([]byte(gql), role)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, sqlB, err := pcompile.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sql := string(sqlB)
|
||||
|
||||
match := false
|
||||
for _, s := range sqlStmts {
|
||||
if sql == s {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !match {
|
||||
s := string(sql)
|
||||
sqlStmts = append(sqlStmts, s)
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
qc, err := qcompile.Compile([]byte(gql), role)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, sqlStmt, err := pcompile.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
failed := true
|
||||
|
||||
for _, sql := range expected[t.Name()] {
|
||||
if string(sqlStmt) == sql {
|
||||
failed = false
|
||||
}
|
||||
}
|
||||
|
||||
if failed {
|
||||
fmt.Println(string(sqlStmt))
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
}
|
1287
psql/query.go
Normal file
1287
psql/query.go
Normal file
@ -0,0 +1,1287 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
const (
|
||||
closeBlock = 500
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAllTablesSkipped = errors.New("all tables skipped. cannot render query")
|
||||
)
|
||||
|
||||
type Variables map[string]json.RawMessage
|
||||
|
||||
type Config struct {
|
||||
Schema *DBSchema
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
schema *DBSchema
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{
|
||||
schema: conf.Schema,
|
||||
vars: conf.Vars,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
|
||||
return c.schema.SetRel(child, parent, rel)
|
||||
}
|
||||
|
||||
func (c *Compiler) IDColumn(table string) (*DBColumn, error) {
|
||||
ti, err := c.schema.GetTable(table)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ti.PrimaryCol == nil {
|
||||
return nil, fmt.Errorf("no primary key column found")
|
||||
}
|
||||
|
||||
return ti.PrimaryCol, nil
|
||||
}
|
||||
|
||||
type compilerContext struct {
|
||||
w io.Writer
|
||||
s []qcode.Select
|
||||
*Compiler
|
||||
}
|
||||
|
||||
func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (uint32, []byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
skipped, err := co.Compile(qc, w, vars)
|
||||
return skipped, w.Bytes(), err
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQuery(qc, w, vars)
|
||||
case qcode.QTInsert, qcode.QTUpdate, qcode.QTDelete, qcode.QTUpsert:
|
||||
return co.compileMutation(qc, w, vars)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Unknown operation type %d", qc.Type)
|
||||
}
|
||||
|
||||
func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
|
||||
st := NewIntStack()
|
||||
i := 0
|
||||
|
||||
io.WriteString(c.w, `SELECT json_build_object(`)
|
||||
for _, id := range qc.Roots {
|
||||
root := &qc.Selects[id]
|
||||
if root.SkipRender || len(root.Cols) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
st.Push(root.ID + closeBlock)
|
||||
st.Push(root.ID)
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
c.renderRootSelect(root)
|
||||
i++
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `) as "__root" FROM `)
|
||||
|
||||
if i == 0 {
|
||||
return 0, ErrAllTablesSkipped
|
||||
}
|
||||
|
||||
var ignored uint32
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
id := st.Pop()
|
||||
|
||||
if id < closeBlock {
|
||||
sel := &c.s[id]
|
||||
|
||||
if len(sel.Cols) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if sel.ParentID == -1 {
|
||||
io.WriteString(c.w, `(`)
|
||||
} else {
|
||||
c.renderLateralJoin(sel)
|
||||
}
|
||||
|
||||
if !ti.Singular {
|
||||
c.renderPluralSelect(sel, ti)
|
||||
}
|
||||
|
||||
skipped, err := c.renderSelect(sel, ti, vars)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ignored |= skipped
|
||||
|
||||
for _, cid := range sel.Children {
|
||||
if hasBit(skipped, uint32(cid)) {
|
||||
continue
|
||||
}
|
||||
child := &c.s[cid]
|
||||
if child.SkipRender {
|
||||
continue
|
||||
}
|
||||
|
||||
st.Push(child.ID + closeBlock)
|
||||
st.Push(child.ID)
|
||||
}
|
||||
|
||||
} else {
|
||||
sel := &c.s[(id - closeBlock)]
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sr", sel.ID)
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sj", sel.ID)
|
||||
|
||||
if !ti.Singular {
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sj", sel.ID)
|
||||
}
|
||||
|
||||
if sel.ParentID == -1 {
|
||||
if st.Len() != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
} else {
|
||||
c.renderLateralJoinClose(sel)
|
||||
}
|
||||
|
||||
if len(sel.Args) != 0 {
|
||||
i := 0
|
||||
for _, v := range sel.Args {
|
||||
qcode.FreeNode(v, 500)
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ignored, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderPluralSelect(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
io.WriteString(c.w, `SELECT coalesce(json_agg("__sj_`)
|
||||
int2string(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."json"), '[]') as "json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
n := 0
|
||||
|
||||
// check if primary key already included in order by
|
||||
// query argument
|
||||
for _, ob := range sel.OrderBy {
|
||||
if ob.Col == ti.PrimaryCol.Key {
|
||||
n = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
n = len(sel.OrderBy)
|
||||
} else {
|
||||
n = len(sel.OrderBy) + 1
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `, CONCAT_WS(','`)
|
||||
for i := 0; i < n; i++ {
|
||||
io.WriteString(c.w, `, max("__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
io.WriteString(c.w, `")`)
|
||||
}
|
||||
io.WriteString(c.w, `) as "cursor"`)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRootSelect(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `'`)
|
||||
io.WriteString(c.w, sel.FieldName)
|
||||
io.WriteString(c.w, `', `)
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int2string(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
io.WriteString(c.w, `, '`)
|
||||
io.WriteString(c.w, sel.FieldName)
|
||||
io.WriteString(c.w, `_cursor', `)
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int2string(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."cursor"`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) (uint32, []*qcode.Column, error) {
|
||||
var skipped uint32
|
||||
|
||||
cols := make([]*qcode.Column, 0, len(sel.Cols))
|
||||
colmap := make(map[string]struct{}, len(sel.Cols))
|
||||
|
||||
for i := range sel.Cols {
|
||||
colmap[sel.Cols[i].Name] = struct{}{}
|
||||
}
|
||||
|
||||
for i := range sel.OrderBy {
|
||||
colmap[sel.OrderBy[i].Col] = struct{}{}
|
||||
}
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
colmap[ti.PrimaryCol.Key] = struct{}{}
|
||||
addPrimaryKey := true
|
||||
|
||||
for _, ob := range sel.OrderBy {
|
||||
if ob.Col == ti.PrimaryCol.Key {
|
||||
addPrimaryKey = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if addPrimaryKey {
|
||||
ob := &qcode.OrderBy{Col: ti.PrimaryCol.Name, Order: qcode.OrderAsc}
|
||||
|
||||
if sel.Paging.Type == qcode.PtBackward {
|
||||
ob.Order = qcode.OrderDesc
|
||||
}
|
||||
sel.OrderBy = append(sel.OrderBy, ob)
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Paging.Cursor {
|
||||
c.addSeekPredicate(sel)
|
||||
}
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Name, ti.Name)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
//skipped |= (1 << uint(id))
|
||||
//continue
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelOneToOne, RelOneToMany:
|
||||
if _, ok := colmap[rel.Right.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Right.Col, FieldName: rel.Right.Col})
|
||||
colmap[rel.Right.Col] = struct{}{}
|
||||
}
|
||||
|
||||
case RelOneToManyThrough:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
}
|
||||
|
||||
case RelEmbedded:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
}
|
||||
|
||||
case RelRemote:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Right.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
skipped |= (1 << uint(id))
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, nil, fmt.Errorf("unknown relationship %s", rel)
|
||||
//skipped |= (1 << uint(id))
|
||||
}
|
||||
}
|
||||
|
||||
return skipped, cols, nil
|
||||
}
|
||||
|
||||
// This
|
||||
// (A, B, C) >= (X, Y, Z)
|
||||
//
|
||||
// Becomes
|
||||
// (A > X)
|
||||
// OR ((A = X) AND (B > Y))
|
||||
// OR ((A = X) AND (B = Y) AND (C > Z))
|
||||
// OR ((A = X) AND (B = Y) AND (C = Z))
|
||||
|
||||
func (c *compilerContext) addSeekPredicate(sel *qcode.Select) error {
|
||||
var or, and *qcode.Exp
|
||||
|
||||
obLen := len(sel.OrderBy)
|
||||
|
||||
if obLen > 1 {
|
||||
or = qcode.NewFilter()
|
||||
or.Op = qcode.OpOr
|
||||
}
|
||||
|
||||
for i := 0; i < obLen; i++ {
|
||||
if i > 0 {
|
||||
and = qcode.NewFilter()
|
||||
and.Op = qcode.OpAnd
|
||||
}
|
||||
|
||||
for n, ob := range sel.OrderBy {
|
||||
f := qcode.NewFilter()
|
||||
f.Col = ob.Col
|
||||
f.Type = qcode.ValRef
|
||||
f.Table = "__cur"
|
||||
f.Val = ob.Col
|
||||
|
||||
if obLen == 1 {
|
||||
qcode.AddFilter(sel, f)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case i > 0 && n != i:
|
||||
f.Op = qcode.OpEquals
|
||||
case ob.Order == qcode.OrderDesc:
|
||||
f.Op = qcode.OpLesserThan
|
||||
default:
|
||||
f.Op = qcode.OpGreaterThan
|
||||
}
|
||||
|
||||
if and != nil {
|
||||
and.Children = append(and.Children, f)
|
||||
} else {
|
||||
or.Children = append(or.Children, f)
|
||||
}
|
||||
|
||||
if n == i {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if and != nil {
|
||||
or.Children = append(or.Children, and)
|
||||
}
|
||||
}
|
||||
|
||||
qcode.AddFilter(sel, or)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) (uint32, error) {
|
||||
var rel *DBRel
|
||||
var err error
|
||||
|
||||
if sel.ParentID != -1 {
|
||||
parent := c.s[sel.ParentID]
|
||||
|
||||
rel, err = c.schema.GetRel(ti.Name, parent.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
skipped, childCols, err := c.initSelect(sel, ti, vars)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// SELECT
|
||||
// io.WriteString(c.w, `SELECT json_build_object(`)
|
||||
// if err := c.renderColumns(sel, ti, skipped); err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
|
||||
io.WriteString(c.w, `SELECT row_to_json("__sr_`)
|
||||
int2string(c.w, sel.ID)
|
||||
io.WriteString(c.w, `") AS "json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i := range sel.OrderBy {
|
||||
io.WriteString(c.w, `, "__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `FROM (SELECT `)
|
||||
|
||||
if err := c.renderColumns(sel, ti, skipped); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i, ob := range sel.OrderBy {
|
||||
io.WriteString(c.w, `, LAST_VALUE(`)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, ob.Col)
|
||||
io.WriteString(c.w, `) OVER() AS "__cur_`)
|
||||
int2string(c.w, int32(i))
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM (`)
|
||||
|
||||
// FROM (SELECT .... )
|
||||
err = c.renderBaseSelect(sel, ti, rel, childCols, skipped)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Name, c.sel.ID)
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, ti.Name, sel.ID)
|
||||
|
||||
// END-FROM
|
||||
|
||||
return skipped, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderLateralJoin(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, ` LEFT OUTER JOIN LATERAL (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderLateralJoinClose(sel *qcode.Select) error {
|
||||
// io.WriteString(c.w, `) `)
|
||||
// aliasWithID(c.w, "__sj", sel.ID)
|
||||
io.WriteString(c.w, ` ON ('true')`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoin(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
parent := &c.s[sel.ParentID]
|
||||
return c.renderJoinByName(ti.Name, parent.Name, parent.ID)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinByName(table, parent string, id int32) error {
|
||||
rel, err := c.schema.GetRel(table, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This join is only required for one-to-many relations since
|
||||
// these make use of join tables that need to be pulled in.
|
||||
if rel.Type != RelOneToManyThrough {
|
||||
return err
|
||||
}
|
||||
|
||||
pt, err := c.schema.GetTable(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//rel.Through, rel.Through, rel.ColT, c.parent.Name, c.parent.ID, rel.Left.Col)
|
||||
io.WriteString(c.w, ` LEFT OUTER JOIN "`)
|
||||
io.WriteString(c.w, rel.Through)
|
||||
io.WriteString(c.w, `" ON ((`)
|
||||
colWithTable(c.w, rel.Through, rel.ColT)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTableID(c.w, pt.Name, id, rel.Left.Col)
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
|
||||
i := 0
|
||||
var cn string
|
||||
|
||||
for _, col := range sel.Cols {
|
||||
if n := funcPrefixLen(col.Name); n != 0 {
|
||||
if !sel.Functions {
|
||||
continue
|
||||
}
|
||||
cn = col.Name[n:]
|
||||
} else {
|
||||
cn = col.Name
|
||||
|
||||
if strings.HasSuffix(cn, "_cursor") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(sel.Allowed) != 0 {
|
||||
if _, ok := sel.Allowed[cn]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
|
||||
colWithTableID(c.w, ti.Name, sel.ID, col.Name)
|
||||
alias(c.w, col.FieldName)
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
i += c.renderRemoteRelColumns(sel, ti, i)
|
||||
|
||||
return c.renderJoinColumns(sel, ti, skipped, i)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo, colsRendered int) int {
|
||||
i := colsRendered
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Name, sel.Name)
|
||||
if err != nil || rel.Type != RelRemote {
|
||||
continue
|
||||
}
|
||||
if i != 0 || len(sel.Cols) != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
|
||||
colWithTableID(c.w, ti.Name, sel.ID, rel.Left.Col)
|
||||
alias(c.w, rel.Right.Col)
|
||||
i++
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32, colsRendered int) error {
|
||||
// columns previously rendered
|
||||
i := colsRendered
|
||||
|
||||
for _, id := range sel.Children {
|
||||
if hasBit(skipped, uint32(id)) {
|
||||
continue
|
||||
}
|
||||
childSel := &c.s[id]
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
|
||||
if childSel.SkipRender {
|
||||
io.WriteString(c.w, `NULL`)
|
||||
alias(c.w, childSel.FieldName)
|
||||
continue
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int2string(c.w, childSel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
alias(c.w, childSel.FieldName)
|
||||
|
||||
if childSel.Paging.Type != qcode.PtOffset {
|
||||
io.WriteString(c.w, `, "__sj_`)
|
||||
int2string(c.w, childSel.ID)
|
||||
io.WriteString(c.w, `"."cursor" AS "`)
|
||||
io.WriteString(c.w, childSel.FieldName)
|
||||
io.WriteString(c.w, `_cursor"`)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, rel *DBRel,
|
||||
childCols []*qcode.Column, skipped uint32) error {
|
||||
isRoot := (rel == nil)
|
||||
isFil := (sel.Where != nil && sel.Where.Op != qcode.OpNop)
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
|
||||
if sel.Paging.Cursor {
|
||||
c.renderCursorCTE(sel)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `SELECT `)
|
||||
|
||||
if len(sel.DistinctOn) != 0 {
|
||||
c.renderDistinctOn(sel, ti)
|
||||
}
|
||||
|
||||
realColsRendered, isAgg, err := c.renderBaseColumns(sel, ti, childCols, skipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM `)
|
||||
|
||||
c.renderFrom(sel, ti, rel)
|
||||
|
||||
if isRoot && isFil {
|
||||
io.WriteString(c.w, ` WHERE (`)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
if !isRoot {
|
||||
if err := c.renderJoin(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` WHERE (`)
|
||||
if err := c.renderRelationship(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
if isFil {
|
||||
io.WriteString(c.w, ` AND `)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
if isAgg && len(realColsRendered) != 0 {
|
||||
io.WriteString(c.w, ` GROUP BY `)
|
||||
|
||||
for i, id := range realColsRendered {
|
||||
c.renderComma(i)
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, c.sel.Cols[id].Name)
|
||||
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
|
||||
}
|
||||
}
|
||||
|
||||
if hasOrder {
|
||||
if err := c.renderOrderBy(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case ti.Singular:
|
||||
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
|
||||
|
||||
case len(sel.Paging.Limit) != 0:
|
||||
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
|
||||
io.WriteString(c.w, ` LIMIT ('`)
|
||||
io.WriteString(c.w, sel.Paging.Limit)
|
||||
io.WriteString(c.w, `') :: integer`)
|
||||
|
||||
case sel.Paging.NoLimit:
|
||||
break
|
||||
|
||||
default:
|
||||
io.WriteString(c.w, ` LIMIT ('20') :: integer`)
|
||||
}
|
||||
|
||||
if len(sel.Paging.Offset) != 0 {
|
||||
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
|
||||
io.WriteString(c.w, ` OFFSET ('`)
|
||||
io.WriteString(c.w, sel.Paging.Offset)
|
||||
io.WriteString(c.w, `') :: integer`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderFrom(sel *qcode.Select, ti *DBTableInfo, rel *DBRel) error {
|
||||
if rel != nil && rel.Type == RelEmbedded {
|
||||
// json_to_recordset('[{"a":1,"b":[1,2,3],"c":"bar"}, {"a":2,"b":[1,2,3],"c":"bar"}]') as x(a int, b text, d text);
|
||||
|
||||
io.WriteString(c.w, `"`)
|
||||
io.WriteString(c.w, rel.Left.Table)
|
||||
io.WriteString(c.w, `", `)
|
||||
|
||||
io.WriteString(c.w, ti.Type)
|
||||
io.WriteString(c.w, `_to_recordset(`)
|
||||
colWithTable(c.w, rel.Left.Table, rel.Right.Col)
|
||||
io.WriteString(c.w, `) AS `)
|
||||
|
||||
io.WriteString(c.w, `"`)
|
||||
io.WriteString(c.w, ti.Name)
|
||||
io.WriteString(c.w, `"`)
|
||||
|
||||
io.WriteString(c.w, `(`)
|
||||
for i, col := range ti.Columns {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, col.Name)
|
||||
io.WriteString(c.w, ` `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
} else {
|
||||
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
|
||||
io.WriteString(c.w, `"`)
|
||||
io.WriteString(c.w, ti.Name)
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
|
||||
if sel.Paging.Cursor {
|
||||
io.WriteString(c.w, `, "__cur"`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderCursorCTE(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `WITH "__cur" AS (SELECT `)
|
||||
for i, ob := range sel.OrderBy {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, `a[`)
|
||||
int2string(c.w, int32(i+1))
|
||||
io.WriteString(c.w, `] as `)
|
||||
quoted(c.w, ob.Col)
|
||||
}
|
||||
io.WriteString(c.w, ` FROM string_to_array('{{cursor}}', ',') as a) `)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRelationship(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
parent := c.s[sel.ParentID]
|
||||
|
||||
pti, err := c.schema.GetTable(parent.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.renderRelationshipByName(ti.Name, pti.Name, parent.ID)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRelationshipByName(table, parent string, id int32) error {
|
||||
rel, err := c.schema.GetRel(table, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `((`)
|
||||
|
||||
switch rel.Type {
|
||||
case RelOneToOne, RelOneToMany:
|
||||
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//c.sel.Name, rel.Left.Col, c.parent.Name, c.parent.ID, rel.Right.Col)
|
||||
|
||||
switch {
|
||||
case !rel.Left.Array && rel.Right.Array:
|
||||
colWithTable(c.w, table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTableID(c.w, parent, id, rel.Right.Col)
|
||||
|
||||
case rel.Left.Array && !rel.Right.Array:
|
||||
colWithTableID(c.w, parent, id, rel.Right.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTable(c.w, table, rel.Left.Col)
|
||||
|
||||
default:
|
||||
colWithTable(c.w, table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTableID(c.w, parent, id, rel.Right.Col)
|
||||
}
|
||||
|
||||
case RelOneToManyThrough:
|
||||
// This requires the through table to be joined onto this select
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s"."%s"))`,
|
||||
//c.sel.Name, rel.Left.Col, rel.Through, rel.Right.Col)
|
||||
|
||||
switch {
|
||||
case !rel.Left.Array && rel.Right.Array:
|
||||
colWithTable(c.w, table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTable(c.w, rel.Through, rel.Right.Col)
|
||||
|
||||
case rel.Left.Array && !rel.Right.Array:
|
||||
colWithTable(c.w, rel.Through, rel.Right.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTable(c.w, table, rel.Left.Col)
|
||||
|
||||
default:
|
||||
colWithTable(c.w, table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTable(c.w, rel.Through, rel.Right.Col)
|
||||
}
|
||||
|
||||
case RelEmbedded:
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTableID(c.w, parent, id, rel.Left.Col)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
if sel.Where != nil {
|
||||
return c.renderExp(sel.Where, ti, false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested bool) error {
|
||||
st := util.NewStack()
|
||||
st.Push(ex)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
switch val := intf.(type) {
|
||||
case int32:
|
||||
switch val {
|
||||
case '(':
|
||||
io.WriteString(c.w, `(`)
|
||||
case ')':
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
case qcode.ExpOp:
|
||||
switch val {
|
||||
case qcode.OpAnd:
|
||||
io.WriteString(c.w, ` AND `)
|
||||
case qcode.OpOr:
|
||||
io.WriteString(c.w, ` OR `)
|
||||
case qcode.OpNot:
|
||||
io.WriteString(c.w, `NOT `)
|
||||
case qcode.OpFalse:
|
||||
io.WriteString(c.w, `false`)
|
||||
default:
|
||||
return fmt.Errorf("11: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
case *qcode.Exp:
|
||||
switch val.Op {
|
||||
case qcode.OpFalse:
|
||||
st.Push(val.Op)
|
||||
|
||||
case qcode.OpAnd, qcode.OpOr:
|
||||
st.Push(')')
|
||||
for i := len(val.Children) - 1; i >= 0; i-- {
|
||||
st.Push(val.Children[i])
|
||||
if i > 0 {
|
||||
st.Push(val.Op)
|
||||
}
|
||||
}
|
||||
st.Push('(')
|
||||
|
||||
case qcode.OpNot:
|
||||
//fmt.Printf("1> %s %d %s %s\n", val.Op, len(val.Children), val.Children[0].Op, val.Children[1].Op)
|
||||
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
|
||||
default:
|
||||
if !skipNested && len(val.NestedCols) != 0 {
|
||||
io.WriteString(c.w, `EXISTS `)
|
||||
|
||||
if err := c.renderNestedWhere(val, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
//fmt.Fprintf(w, `(("%s"."%s") `, c.sel.Name, val.Col)
|
||||
if err := c.renderOp(val, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
//qcode.FreeExp(val)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("12: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderNestedWhere(ex *qcode.Exp, ti *DBTableInfo) error {
|
||||
for i := 0; i < len(ex.NestedCols)-1; i++ {
|
||||
cti, err := c.schema.GetTable(ex.NestedCols[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ` AND `)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `(SELECT 1 FROM `)
|
||||
io.WriteString(c.w, cti.Name)
|
||||
|
||||
if err := c.renderJoinByName(cti.Name, ti.Name, -1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderRelationshipByName(cti.Name, ti.Name, -1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` AND (`)
|
||||
|
||||
if err := c.renderExp(ex, cti, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
}
|
||||
|
||||
for i := 0; i < len(ex.NestedCols)-1; i++ {
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
|
||||
var col *DBColumn
|
||||
var ok bool
|
||||
|
||||
if ex.Op == qcode.OpNop {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(ex.Col) != 0 {
|
||||
if col, ok = ti.ColMap[ex.Col]; !ok {
|
||||
return fmt.Errorf("no column '%s' found ", ex.Col)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ex.Col)
|
||||
io.WriteString(c.w, `) `)
|
||||
}
|
||||
|
||||
switch ex.Op {
|
||||
case qcode.OpEquals:
|
||||
io.WriteString(c.w, `=`)
|
||||
case qcode.OpNotEquals:
|
||||
io.WriteString(c.w, `!=`)
|
||||
case qcode.OpNotDistinct:
|
||||
io.WriteString(c.w, `IS NOT DISTINCT FROM`)
|
||||
case qcode.OpDistinct:
|
||||
io.WriteString(c.w, `IS DISTINCT FROM`)
|
||||
case qcode.OpGreaterOrEquals:
|
||||
io.WriteString(c.w, `>=`)
|
||||
case qcode.OpLesserOrEquals:
|
||||
io.WriteString(c.w, `<=`)
|
||||
case qcode.OpGreaterThan:
|
||||
io.WriteString(c.w, `>`)
|
||||
case qcode.OpLesserThan:
|
||||
io.WriteString(c.w, `<`)
|
||||
case qcode.OpIn:
|
||||
io.WriteString(c.w, `IN`)
|
||||
case qcode.OpNotIn:
|
||||
io.WriteString(c.w, `NOT IN`)
|
||||
case qcode.OpLike:
|
||||
io.WriteString(c.w, `LIKE`)
|
||||
case qcode.OpNotLike:
|
||||
io.WriteString(c.w, `NOT LIKE`)
|
||||
case qcode.OpILike:
|
||||
io.WriteString(c.w, `ILIKE`)
|
||||
case qcode.OpNotILike:
|
||||
io.WriteString(c.w, `NOT ILIKE`)
|
||||
case qcode.OpSimilar:
|
||||
io.WriteString(c.w, `SIMILAR TO`)
|
||||
case qcode.OpNotSimilar:
|
||||
io.WriteString(c.w, `NOT SIMILAR TO`)
|
||||
case qcode.OpContains:
|
||||
io.WriteString(c.w, `@>`)
|
||||
case qcode.OpContainedIn:
|
||||
io.WriteString(c.w, `<@`)
|
||||
case qcode.OpHasKey:
|
||||
io.WriteString(c.w, `?`)
|
||||
case qcode.OpHasKeyAny:
|
||||
io.WriteString(c.w, `?|`)
|
||||
case qcode.OpHasKeyAll:
|
||||
io.WriteString(c.w, `?&`)
|
||||
case qcode.OpIsNull:
|
||||
if strings.EqualFold(ex.Val, "true") {
|
||||
io.WriteString(c.w, `IS NULL)`)
|
||||
} else {
|
||||
io.WriteString(c.w, `IS NOT NULL)`)
|
||||
}
|
||||
return nil
|
||||
|
||||
case qcode.OpEqID:
|
||||
if ti.PrimaryCol == nil {
|
||||
return fmt.Errorf("no primary key column defined for %s", ti.Name)
|
||||
}
|
||||
col = ti.PrimaryCol
|
||||
//fmt.Fprintf(w, `(("%s") =`, c.ti.PrimaryCol)
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ti.PrimaryCol.Name)
|
||||
//io.WriteString(c.w, ti.PrimaryCol)
|
||||
io.WriteString(c.w, `) =`)
|
||||
|
||||
case qcode.OpTsQuery:
|
||||
if ti.PrimaryCol == nil {
|
||||
return fmt.Errorf("no tsv column defined for %s", ti.Name)
|
||||
}
|
||||
//fmt.Fprintf(w, `(("%s") @@ websearch_to_tsquery('%s'))`, c.ti.TSVCol, val.Val)
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ti.TSVCol.Name)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `) @@ websearch_to_tsquery('{{`)
|
||||
} else {
|
||||
io.WriteString(c.w, `) @@ to_tsquery('{{`)
|
||||
}
|
||||
io.WriteString(c.w, ex.Val)
|
||||
io.WriteString(c.w, `}}'))`)
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected op code %d", ex.Op)
|
||||
}
|
||||
|
||||
switch {
|
||||
case ex.Type == qcode.ValList:
|
||||
c.renderList(ex)
|
||||
case col == nil:
|
||||
return errors.New("no column found for expression value")
|
||||
default:
|
||||
c.renderVal(ex, c.vars, col)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOrderBy(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
io.WriteString(c.w, ` ORDER BY `)
|
||||
for i := range sel.OrderBy {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
ob := sel.OrderBy[i]
|
||||
colWithTable(c.w, ti.Name, ob.Col)
|
||||
|
||||
switch ob.Order {
|
||||
case qcode.OrderAsc:
|
||||
io.WriteString(c.w, ` ASC`)
|
||||
case qcode.OrderDesc:
|
||||
io.WriteString(c.w, ` DESC`)
|
||||
case qcode.OrderAscNullsFirst:
|
||||
io.WriteString(c.w, ` ASC NULLS FIRST`)
|
||||
case qcode.OrderDescNullsFirst:
|
||||
io.WriteString(c.w, ` DESC NULLLS FIRST`)
|
||||
case qcode.OrderAscNullsLast:
|
||||
io.WriteString(c.w, ` ASC NULLS LAST`)
|
||||
case qcode.OrderDescNullsLast:
|
||||
io.WriteString(c.w, ` DESC NULLS LAST`)
|
||||
default:
|
||||
return fmt.Errorf("13: unexpected value %v", ob.Order)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDistinctOn(sel *qcode.Select, ti *DBTableInfo) {
|
||||
io.WriteString(c.w, `DISTINCT ON (`)
|
||||
for i := range sel.DistinctOn {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
colWithTable(c.w, ti.Name, sel.DistinctOn[i])
|
||||
}
|
||||
io.WriteString(c.w, `) `)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderList(ex *qcode.Exp) {
|
||||
io.WriteString(c.w, ` (`)
|
||||
for i := range ex.ListVal {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
switch ex.ListType {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
io.WriteString(c.w, ex.ListVal[i])
|
||||
case qcode.ValStr:
|
||||
io.WriteString(c.w, `'`)
|
||||
io.WriteString(c.w, ex.ListVal[i])
|
||||
io.WriteString(c.w, `'`)
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *DBColumn) {
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
switch ex.Type {
|
||||
case qcode.ValVar:
|
||||
val, ok := vars[ex.Val]
|
||||
switch {
|
||||
case ok && strings.HasPrefix(val, "sql:"):
|
||||
io.WriteString(c.w, ` (`)
|
||||
io.WriteString(c.w, val[4:])
|
||||
io.WriteString(c.w, `)`)
|
||||
case ok:
|
||||
squoted(c.w, val)
|
||||
default:
|
||||
io.WriteString(c.w, ` '{{`)
|
||||
io.WriteString(c.w, ex.Val)
|
||||
io.WriteString(c.w, `}}'`)
|
||||
}
|
||||
|
||||
case qcode.ValRef:
|
||||
colWithTable(c.w, ex.Table, ex.Col)
|
||||
|
||||
default:
|
||||
squoted(c.w, ex.Val)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` :: `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "count_"):
|
||||
return 6
|
||||
case strings.HasPrefix(fn, "max_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "min_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "sum_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "stddev_"):
|
||||
return 7
|
||||
case strings.HasPrefix(fn, "stddev_pop_"):
|
||||
return 11
|
||||
case strings.HasPrefix(fn, "stddev_samp_"):
|
||||
return 12
|
||||
case strings.HasPrefix(fn, "variance_"):
|
||||
return 9
|
||||
case strings.HasPrefix(fn, "var_pop_"):
|
||||
return 8
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func hasBit(n uint32, pos uint32) bool {
|
||||
val := n & (1 << pos)
|
||||
return (val > 0)
|
||||
}
|
||||
|
||||
func alias(w io.Writer, alias string) {
|
||||
io.WriteString(w, ` AS "`)
|
||||
io.WriteString(w, alias)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func aliasWithID(w io.Writer, alias string, id int32) {
|
||||
io.WriteString(w, ` AS "`)
|
||||
io.WriteString(w, alias)
|
||||
io.WriteString(w, `_`)
|
||||
int2string(w, id)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func colWithTable(w io.Writer, table, col string) {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, table)
|
||||
io.WriteString(w, `"."`)
|
||||
io.WriteString(w, col)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func colWithTableID(w io.Writer, table string, id int32, col string) {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, table)
|
||||
if id >= 0 {
|
||||
io.WriteString(w, `_`)
|
||||
int2string(w, id)
|
||||
}
|
||||
io.WriteString(w, `"."`)
|
||||
io.WriteString(w, col)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func quoted(w io.Writer, identifier string) {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, identifier)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func squoted(w io.Writer, identifier string) {
|
||||
io.WriteString(w, `'`)
|
||||
io.WriteString(w, identifier)
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
|
||||
const charset = "0123456789"
|
||||
|
||||
func int2string(w io.Writer, val int32) {
|
||||
if val < 10 {
|
||||
w.Write([]byte{charset[val]})
|
||||
return
|
||||
}
|
||||
|
||||
temp := int32(0)
|
||||
val2 := val
|
||||
for val2 > 0 {
|
||||
temp *= 10
|
||||
temp += val2 % 10
|
||||
val2 = int32(float64(val2 / 10))
|
||||
}
|
||||
|
||||
val3 := temp
|
||||
for val3 > 0 {
|
||||
d := val3 % 10
|
||||
val3 /= 10
|
||||
w.Write([]byte{charset[d]})
|
||||
}
|
||||
}
|
459
psql/query_test.go
Normal file
459
psql/query_test.go
Normal file
@ -0,0 +1,459 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func withComplexArgs(t *testing.T) {
|
||||
gql := `query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 20 and < 28 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereAndList(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: [
|
||||
{ not: { id: { is_null: true } } },
|
||||
{ price: { gt: 10 } },
|
||||
] } ) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereIsNull(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereMultiOr(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func fetchByID(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: $id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func searchQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
products(search: $query) {
|
||||
id
|
||||
name
|
||||
search_rank
|
||||
search_headline_description
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "admin")
|
||||
}
|
||||
|
||||
func oneToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
email
|
||||
products {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func oneToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func oneToManyArray(t *testing.T) {
|
||||
gql := `
|
||||
query {
|
||||
product {
|
||||
name
|
||||
price
|
||||
tags {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
tags {
|
||||
name
|
||||
product {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "admin")
|
||||
}
|
||||
|
||||
func manyToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func manyToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
products {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func aggFunction(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func aggFunctionBlockedByCol(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
func aggFunctionDisabled(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon1")
|
||||
}
|
||||
|
||||
func aggFunctionWithFilter(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { gt: 10 } }) {
|
||||
id
|
||||
max_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func queryWithVariables(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: $PRODUCT_ID, where: { price: { eq: $PRODUCT_PRICE } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereOnRelations(t *testing.T) {
|
||||
gql := `query {
|
||||
users(where: {
|
||||
not: {
|
||||
products: {
|
||||
price: { gt: 3 }
|
||||
}
|
||||
}
|
||||
}) {
|
||||
id
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func multiRoot(t *testing.T) {
|
||||
gql := `query {
|
||||
product {
|
||||
id
|
||||
name
|
||||
customer {
|
||||
email
|
||||
}
|
||||
customers {
|
||||
email
|
||||
}
|
||||
}
|
||||
user {
|
||||
id
|
||||
email
|
||||
}
|
||||
customer {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withCursor(t *testing.T) {
|
||||
gql := `query {
|
||||
Products(
|
||||
first: 20
|
||||
after: $cursor
|
||||
order_by: { price: desc }) {
|
||||
Name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"cursor": json.RawMessage(`"0,1"`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func jsonColumnAsTable(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
tag_count {
|
||||
count
|
||||
tags {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "admin")
|
||||
}
|
||||
|
||||
func nullForAuthRequiredInAnon(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user(where: { id: { eq: $user_id } }) {
|
||||
id
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
func blockedQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
user(id: $id, where: { id: { gt: 3 } }) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "bad_dude")
|
||||
}
|
||||
|
||||
func blockedFunctions(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
count_id
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "bad_dude")
|
||||
}
|
||||
|
||||
func TestCompileQuery(t *testing.T) {
|
||||
t.Run("withComplexArgs", withComplexArgs)
|
||||
t.Run("withWhereAndList", withWhereAndList)
|
||||
t.Run("withWhereIsNull", withWhereIsNull)
|
||||
t.Run("withWhereMultiOr", withWhereMultiOr)
|
||||
t.Run("fetchByID", fetchByID)
|
||||
t.Run("searchQuery", searchQuery)
|
||||
t.Run("oneToMany", oneToMany)
|
||||
t.Run("oneToManyReverse", oneToManyReverse)
|
||||
t.Run("oneToManyArray", oneToManyArray)
|
||||
t.Run("manyToMany", manyToMany)
|
||||
t.Run("manyToManyReverse", manyToManyReverse)
|
||||
t.Run("aggFunction", aggFunction)
|
||||
t.Run("aggFunctionBlockedByCol", aggFunctionBlockedByCol)
|
||||
t.Run("aggFunctionDisabled", aggFunctionDisabled)
|
||||
t.Run("aggFunctionWithFilter", aggFunctionWithFilter)
|
||||
t.Run("syntheticTables", syntheticTables)
|
||||
t.Run("queryWithVariables", queryWithVariables)
|
||||
t.Run("withWhereOnRelations", withWhereOnRelations)
|
||||
t.Run("multiRoot", multiRoot)
|
||||
t.Run("jsonColumnAsTable", jsonColumnAsTable)
|
||||
t.Run("withCursor", withCursor)
|
||||
t.Run("nullForAuthRequiredInAnon", nullForAuthRequiredInAnon)
|
||||
t.Run("blockedQuery", blockedQuery)
|
||||
t.Run("blockedFunctions", blockedFunctions)
|
||||
}
|
||||
|
||||
var benchGQL = []byte(`query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
user {
|
||||
full_name
|
||||
picture : avatar
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
func BenchmarkCompile(b *testing.B) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL, "user")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompileParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
for pb.Next() {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL, "user")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
426
psql/schema.go
Normal file
426
psql/schema.go
Normal file
@ -0,0 +1,426 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gobuffalo/flect"
|
||||
)
|
||||
|
||||
type DBSchema struct {
|
||||
ver int
|
||||
t map[string]*DBTableInfo
|
||||
rm map[string]map[string]*DBRel
|
||||
}
|
||||
|
||||
type DBTableInfo struct {
|
||||
Name string
|
||||
Type string
|
||||
Singular bool
|
||||
Columns []DBColumn
|
||||
PrimaryCol *DBColumn
|
||||
TSVCol *DBColumn
|
||||
ColMap map[string]*DBColumn
|
||||
ColIDMap map[int16]*DBColumn
|
||||
}
|
||||
|
||||
type RelType int
|
||||
|
||||
const (
|
||||
RelOneToOne RelType = iota + 1
|
||||
RelOneToMany
|
||||
RelOneToManyThrough
|
||||
RelEmbedded
|
||||
RelRemote
|
||||
)
|
||||
|
||||
type DBRel struct {
|
||||
Type RelType
|
||||
Through string
|
||||
ColT string
|
||||
Left struct {
|
||||
col *DBColumn
|
||||
Table string
|
||||
Col string
|
||||
Array bool
|
||||
}
|
||||
Right struct {
|
||||
col *DBColumn
|
||||
Table string
|
||||
Col string
|
||||
Array bool
|
||||
}
|
||||
}
|
||||
|
||||
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
|
||||
schema := &DBSchema{
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
err := schema.addTable(t, info.Columns[i], aliases)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
err := schema.firstDegreeRels(t, info.Columns[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
err := schema.secondDegreeRels(t, info.Columns[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) addTable(
|
||||
t DBTable, cols []DBColumn, aliases map[string][]string) error {
|
||||
|
||||
colmap := make(map[string]*DBColumn, len(cols))
|
||||
colidmap := make(map[int16]*DBColumn, len(cols))
|
||||
|
||||
singular := flect.Singularize(t.Key)
|
||||
s.t[singular] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Type: t.Type,
|
||||
Singular: true,
|
||||
Columns: cols,
|
||||
ColMap: colmap,
|
||||
ColIDMap: colidmap,
|
||||
}
|
||||
|
||||
plural := flect.Pluralize(t.Key)
|
||||
s.t[plural] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Type: t.Type,
|
||||
Singular: false,
|
||||
Columns: cols,
|
||||
ColMap: colmap,
|
||||
ColIDMap: colidmap,
|
||||
}
|
||||
|
||||
if al, ok := aliases[t.Key]; ok {
|
||||
for i := range al {
|
||||
k1 := flect.Singularize(al[i])
|
||||
s.t[k1] = s.t[singular]
|
||||
|
||||
k2 := flect.Pluralize(al[i])
|
||||
s.t[k2] = s.t[plural]
|
||||
}
|
||||
}
|
||||
|
||||
for i := range cols {
|
||||
c := &cols[i]
|
||||
|
||||
switch {
|
||||
case c.Type == "tsvector":
|
||||
s.t[singular].TSVCol = c
|
||||
s.t[plural].TSVCol = c
|
||||
|
||||
case c.PrimaryKey:
|
||||
s.t[singular].PrimaryCol = c
|
||||
s.t[plural].PrimaryCol = c
|
||||
}
|
||||
|
||||
colmap[c.Key] = c
|
||||
colidmap[c.ID] = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) firstDegreeRels(t DBTable, cols []DBColumn) error {
|
||||
ct := t.Key
|
||||
cti, ok := s.t[ct]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ct)
|
||||
}
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
|
||||
if len(c.FKeyTable) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column name
|
||||
ft := strings.ToLower(c.FKeyTable)
|
||||
|
||||
ti, ok := s.t[ft]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ft)
|
||||
}
|
||||
|
||||
// This is an embedded relationship like when a json/jsonb column
|
||||
// is exposed as a table
|
||||
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
|
||||
rel := &DBRel{Type: RelEmbedded}
|
||||
rel.Left.col = cti.PrimaryCol
|
||||
rel.Left.Table = cti.Name
|
||||
rel.Left.Col = cti.PrimaryCol.Name
|
||||
|
||||
rel.Right.col = &c
|
||||
rel.Right.Table = ti.Name
|
||||
rel.Right.Col = c.Name
|
||||
|
||||
if err := s.SetRel(ft, ct, rel); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column id
|
||||
fcid := c.FKeyColID[0]
|
||||
|
||||
fc, ok := ti.ColIDMap[fcid]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
fcid, ti.Name)
|
||||
}
|
||||
|
||||
var rel1, rel2 *DBRel
|
||||
|
||||
// One-to-many relation between current table and the
|
||||
// table in the foreign key
|
||||
if fc.UniqueKey {
|
||||
rel1 = &DBRel{Type: RelOneToOne}
|
||||
} else {
|
||||
rel1 = &DBRel{Type: RelOneToMany}
|
||||
}
|
||||
|
||||
rel1.Left.col = &c
|
||||
rel1.Left.Table = t.Name
|
||||
rel1.Left.Col = c.Name
|
||||
rel1.Left.Array = c.Array
|
||||
|
||||
rel1.Right.col = fc
|
||||
rel1.Right.Table = c.FKeyTable
|
||||
rel1.Right.Col = fc.Name
|
||||
rel1.Right.Array = fc.Array
|
||||
|
||||
if err := s.SetRel(ct, ft, rel1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// One-to-many reverse relation between the foreign key table and the
|
||||
// the current table
|
||||
if c.UniqueKey {
|
||||
rel2 = &DBRel{Type: RelOneToOne}
|
||||
} else {
|
||||
rel2 = &DBRel{Type: RelOneToMany}
|
||||
}
|
||||
|
||||
rel2.Left.col = fc
|
||||
rel2.Left.Table = c.FKeyTable
|
||||
rel2.Left.Col = fc.Name
|
||||
rel2.Left.Array = fc.Array
|
||||
|
||||
rel2.Right.col = &c
|
||||
rel2.Right.Table = t.Name
|
||||
rel2.Right.Col = c.Name
|
||||
rel2.Right.Array = c.Array
|
||||
|
||||
if err := s.SetRel(ft, ct, rel2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) secondDegreeRels(t DBTable, cols []DBColumn) error {
|
||||
jcols := make([]DBColumn, 0, len(cols))
|
||||
ct := t.Key
|
||||
cti, ok := s.t[ct]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ct)
|
||||
}
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
|
||||
if len(c.FKeyTable) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column name
|
||||
ft := strings.ToLower(c.FKeyTable)
|
||||
|
||||
ti, ok := s.t[ft]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ft)
|
||||
}
|
||||
|
||||
// This is an embedded relationship like when a json/jsonb column
|
||||
// is exposed as a table
|
||||
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column id
|
||||
fcid := c.FKeyColID[0]
|
||||
|
||||
if _, ok := ti.ColIDMap[fcid]; !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
fcid, ti.Name)
|
||||
}
|
||||
|
||||
jcols = append(jcols, c)
|
||||
}
|
||||
|
||||
// If table contains multiple foreign key columns it's a possible
|
||||
// join table for many-to-many relationships or multiple one-to-many
|
||||
// relations
|
||||
|
||||
// Below one-to-many relations use the current table as the
|
||||
// join table aka through table.
|
||||
if len(jcols) > 1 {
|
||||
for i := range jcols {
|
||||
for n := range jcols {
|
||||
if n == i {
|
||||
continue
|
||||
}
|
||||
err := s.updateSchemaOTMT(cti, jcols[i], jcols[n])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) updateSchemaOTMT(
|
||||
ti *DBTableInfo, col1, col2 DBColumn) error {
|
||||
|
||||
t1 := strings.ToLower(col1.FKeyTable)
|
||||
t2 := strings.ToLower(col2.FKeyTable)
|
||||
|
||||
fc1, ok := s.t[t1].ColIDMap[col1.FKeyColID[0]]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
col1.FKeyColID[0], ti.Name)
|
||||
}
|
||||
fc2, ok := s.t[t2].ColIDMap[col2.FKeyColID[0]]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
col2.FKeyColID[0], ti.Name)
|
||||
}
|
||||
|
||||
// One-to-many-through relation between 1nd foreign key table and the
|
||||
// 2nd foreign key table
|
||||
rel1 := &DBRel{Type: RelOneToManyThrough}
|
||||
rel1.Through = ti.Name
|
||||
rel1.ColT = col2.Name
|
||||
|
||||
rel1.Left.col = &col2
|
||||
rel1.Left.Table = col2.FKeyTable
|
||||
rel1.Left.Col = fc2.Name
|
||||
|
||||
rel1.Right.col = &col1
|
||||
rel1.Right.Table = ti.Name
|
||||
rel1.Right.Col = col1.Name
|
||||
|
||||
if err := s.SetRel(t1, t2, rel1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// One-to-many-through relation between 2nd foreign key table and the
|
||||
// 1nd foreign key table
|
||||
rel2 := &DBRel{Type: RelOneToManyThrough}
|
||||
rel2.Through = ti.Name
|
||||
rel2.ColT = col1.Name
|
||||
|
||||
rel1.Left.col = fc1
|
||||
rel2.Left.Table = col1.FKeyTable
|
||||
rel2.Left.Col = fc1.Name
|
||||
|
||||
rel1.Right.col = &col2
|
||||
rel2.Right.Table = ti.Name
|
||||
rel2.Right.Col = col2.Name
|
||||
|
||||
if err := s.SetRel(t2, t1, rel2); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
|
||||
t, ok := s.t[table]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown table '%s'", table)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
|
||||
sp := strings.ToLower(flect.Singularize(parent))
|
||||
pp := strings.ToLower(flect.Pluralize(parent))
|
||||
|
||||
sc := strings.ToLower(flect.Singularize(child))
|
||||
pc := strings.ToLower(flect.Pluralize(child))
|
||||
|
||||
if _, ok := s.rm[sc]; !ok {
|
||||
s.rm[sc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
if _, ok := s.rm[pc]; !ok {
|
||||
s.rm[pc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
if _, ok := s.rm[sc][sp]; !ok {
|
||||
s.rm[sc][sp] = rel
|
||||
}
|
||||
if _, ok := s.rm[sc][pp]; !ok {
|
||||
s.rm[sc][pp] = rel
|
||||
}
|
||||
if _, ok := s.rm[pc][sp]; !ok {
|
||||
s.rm[pc][sp] = rel
|
||||
}
|
||||
if _, ok := s.rm[pc][pp]; !ok {
|
||||
s.rm[pc][pp] = rel
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
|
||||
rel, ok := s.rm[child][parent]
|
||||
if !ok {
|
||||
// No relationship found so this time fetch the table info
|
||||
// and try again in case child or parent was an alias
|
||||
ct, err := s.GetTable(child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pt, err := s.GetTable(parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rel, ok = s.rm[ct.Name][pt.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
|
||||
child, parent)
|
||||
}
|
||||
}
|
||||
return rel, nil
|
||||
}
|
1066
psql/select.go
1066
psql/select.go
@ -1,1066 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
const (
|
||||
empty = ""
|
||||
closeBlock = 500
|
||||
)
|
||||
|
||||
type Variables map[string]json.RawMessage
|
||||
|
||||
type Config struct {
|
||||
Schema *DBSchema
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
schema *DBSchema
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{conf.Schema, conf.Vars}
|
||||
}
|
||||
|
||||
func (c *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
|
||||
return c.schema.SetRel(child, parent, rel)
|
||||
}
|
||||
|
||||
func (c *Compiler) IDColumn(table string) (string, error) {
|
||||
t, err := c.schema.GetTable(table)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
return t.PrimaryCol, nil
|
||||
}
|
||||
|
||||
type compilerContext struct {
|
||||
w *bytes.Buffer
|
||||
s []qcode.Select
|
||||
*Compiler
|
||||
}
|
||||
|
||||
func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (uint32, []byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
skipped, err := co.Compile(qc, w, vars)
|
||||
return skipped, w.Bytes(), err
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQuery(qc, w)
|
||||
case qcode.QTMutation:
|
||||
return co.compileMutation(qc, w, vars)
|
||||
}
|
||||
|
||||
return 0, errors.New("unknown operation")
|
||||
}
|
||||
|
||||
func (co *Compiler) compileQuery(qc *qcode.QCode, w *bytes.Buffer) (uint32, error) {
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
|
||||
ti, err := c.schema.GetTable(root.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
st := NewStack()
|
||||
st.Push(root.ID + closeBlock)
|
||||
st.Push(root.ID)
|
||||
|
||||
//fmt.Fprintf(w, `SELECT json_object_agg('%s', %s) FROM (`,
|
||||
//root.FieldName, root.Table)
|
||||
c.w.WriteString(`SELECT json_object_agg('`)
|
||||
c.w.WriteString(root.FieldName)
|
||||
c.w.WriteString(`', `)
|
||||
|
||||
if ti.Singular == false {
|
||||
c.w.WriteString(root.Table)
|
||||
} else {
|
||||
c.w.WriteString("sel_json_")
|
||||
int2string(c.w, root.ID)
|
||||
}
|
||||
c.w.WriteString(`) FROM (`)
|
||||
|
||||
var ignored uint32
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
id := st.Pop()
|
||||
|
||||
if id < closeBlock {
|
||||
sel := &c.s[id]
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if sel.ID != 0 {
|
||||
if err = c.renderJoin(sel); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
skipped, err := c.renderSelect(sel, ti)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ignored |= skipped
|
||||
|
||||
for _, cid := range sel.Children {
|
||||
if hasBit(skipped, uint32(cid)) {
|
||||
continue
|
||||
}
|
||||
child := &c.s[cid]
|
||||
|
||||
st.Push(child.ID + closeBlock)
|
||||
st.Push(child.ID)
|
||||
}
|
||||
|
||||
} else {
|
||||
sel := &c.s[(id - closeBlock)]
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = c.renderSelectClose(sel, ti)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if sel.ID != 0 {
|
||||
if err = c.renderJoinClose(sel); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.w.WriteString(`)`)
|
||||
alias(c.w, `done_1337`)
|
||||
c.w.WriteString(`;`)
|
||||
|
||||
return ignored, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column) {
|
||||
var skipped uint32
|
||||
|
||||
cols := make([]*qcode.Column, 0, len(sel.Cols))
|
||||
colmap := make(map[string]struct{}, len(sel.Cols))
|
||||
|
||||
for i := range sel.Cols {
|
||||
colmap[sel.Cols[i].Name] = struct{}{}
|
||||
}
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Table, ti.Name)
|
||||
if err != nil {
|
||||
skipped |= (1 << uint(id))
|
||||
continue
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelOneToMany:
|
||||
fallthrough
|
||||
case RelBelongTo:
|
||||
if _, ok := colmap[rel.Col2]; !ok {
|
||||
cols = append(cols, &qcode.Column{ti.Name, rel.Col2, rel.Col2})
|
||||
}
|
||||
case RelOneToManyThrough:
|
||||
if _, ok := colmap[rel.Col1]; !ok {
|
||||
cols = append(cols, &qcode.Column{ti.Name, rel.Col1, rel.Col1})
|
||||
}
|
||||
case RelRemote:
|
||||
if _, ok := colmap[rel.Col1]; !ok {
|
||||
cols = append(cols, &qcode.Column{ti.Name, rel.Col1, rel.Col2})
|
||||
}
|
||||
skipped |= (1 << uint(id))
|
||||
|
||||
default:
|
||||
skipped |= (1 << uint(id))
|
||||
}
|
||||
}
|
||||
|
||||
return skipped, cols
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint32, error) {
|
||||
skipped, childCols := c.processChildren(sel, ti)
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
|
||||
// SELECT
|
||||
if ti.Singular == false {
|
||||
//fmt.Fprintf(w, `SELECT coalesce(json_agg("%s"`, c.sel.Table)
|
||||
c.w.WriteString(`SELECT coalesce(json_agg("`)
|
||||
c.w.WriteString("sel_json_")
|
||||
int2string(c.w, sel.ID)
|
||||
c.w.WriteString(`"`)
|
||||
|
||||
if hasOrder {
|
||||
err := c.renderOrderBy(sel, ti)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `), '[]') AS "%s" FROM (`, c.sel.Table)
|
||||
c.w.WriteString(`), '[]')`)
|
||||
alias(c.w, sel.Table)
|
||||
c.w.WriteString(` FROM (`)
|
||||
}
|
||||
|
||||
// ROW-TO-JSON
|
||||
c.w.WriteString(`SELECT `)
|
||||
|
||||
if len(sel.DistinctOn) != 0 {
|
||||
c.renderDistinctOn(sel, ti)
|
||||
}
|
||||
|
||||
c.w.WriteString(`row_to_json((`)
|
||||
|
||||
//fmt.Fprintf(w, `SELECT "sel_%d" FROM (SELECT `, c.sel.ID)
|
||||
c.w.WriteString(`SELECT "sel_`)
|
||||
int2string(c.w, sel.ID)
|
||||
c.w.WriteString(`" FROM (SELECT `)
|
||||
|
||||
// Combined column names
|
||||
c.renderColumns(sel, ti)
|
||||
|
||||
c.renderRemoteRelColumns(sel, ti)
|
||||
|
||||
err := c.renderJoinedColumns(sel, ti, skipped)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "sel_%d"`, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithID(c.w, "sel", sel.ID)
|
||||
|
||||
//fmt.Fprintf(w, `)) AS "%s"`, c.sel.Table)
|
||||
c.w.WriteString(`))`)
|
||||
aliasWithID(c.w, "sel_json", sel.ID)
|
||||
// END-ROW-TO-JSON
|
||||
|
||||
if hasOrder {
|
||||
c.renderOrderByColumns(sel, ti)
|
||||
}
|
||||
// END-SELECT
|
||||
|
||||
// FROM (SELECT .... )
|
||||
err = c.renderBaseSelect(sel, ti, childCols, skipped)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
// END-FROM
|
||||
|
||||
return skipped, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelectClose(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
|
||||
if hasOrder {
|
||||
err := c.renderOrderBy(sel, ti)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Action == 0 {
|
||||
if len(sel.Paging.Limit) != 0 {
|
||||
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
|
||||
c.w.WriteString(` LIMIT ('`)
|
||||
c.w.WriteString(sel.Paging.Limit)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
|
||||
} else if ti.Singular {
|
||||
c.w.WriteString(` LIMIT ('1') :: integer`)
|
||||
|
||||
} else {
|
||||
c.w.WriteString(` LIMIT ('20') :: integer`)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sel.Paging.Offset) != 0 {
|
||||
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
|
||||
c.w.WriteString(`OFFSET ('`)
|
||||
c.w.WriteString(sel.Paging.Offset)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
}
|
||||
|
||||
if ti.Singular == false {
|
||||
//fmt.Fprintf(w, `) AS "sel_json_agg_%d"`, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithID(c.w, "sel_json_agg", sel.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoin(sel *qcode.Select) error {
|
||||
c.w.WriteString(` LEFT OUTER JOIN LATERAL (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinClose(sel *qcode.Select) error {
|
||||
//fmt.Fprintf(w, `) AS "%s_%d_join" ON ('true')`, c.sel.Table, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithIDSuffix(c.w, sel.Table, sel.ID, "_join")
|
||||
c.w.WriteString(` ON ('true')`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinTable(sel *qcode.Select) error {
|
||||
parent := &c.s[sel.ParentID]
|
||||
|
||||
rel, err := c.schema.GetRel(sel.Table, parent.Table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rel.Type != RelOneToManyThrough {
|
||||
return err
|
||||
}
|
||||
|
||||
pt, err := c.schema.GetTable(parent.Table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//rel.Through, rel.Through, rel.ColT, c.parent.Table, c.parent.ID, rel.Col1)
|
||||
c.w.WriteString(` LEFT OUTER JOIN "`)
|
||||
c.w.WriteString(rel.Through)
|
||||
c.w.WriteString(`" ON ((`)
|
||||
colWithTable(c.w, rel.Through, rel.ColT)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTableID(c.w, pt.Name, parent.ID, rel.Col1)
|
||||
c.w.WriteString(`))`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) {
|
||||
for i, col := range sel.Cols {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
|
||||
//c.sel.Table, c.sel.ID, col.Name, col.FieldName)
|
||||
colWithTableIDAlias(c.w, ti.Name, sel.ID, col.Name, col.FieldName)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo) {
|
||||
i := 0
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Table, sel.Table)
|
||||
if err != nil || rel.Type != RelRemote {
|
||||
continue
|
||||
}
|
||||
if i != 0 || len(sel.Cols) != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
|
||||
//c.sel.Table, c.sel.ID, rel.Col1, rel.Col2)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, rel.Col1)
|
||||
alias(c.w, rel.Col2)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinedColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
|
||||
colsRendered := len(sel.Cols) != 0
|
||||
|
||||
for _, id := range sel.Children {
|
||||
skipThis := hasBit(skipped, uint32(id))
|
||||
|
||||
if colsRendered && !skipThis {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
if skipThis {
|
||||
continue
|
||||
}
|
||||
childSel := &c.s[id]
|
||||
|
||||
//fmt.Fprintf(w, `"%s_%d_join"."%s" AS "%s"`,
|
||||
//s.Table, s.ID, s.Table, s.FieldName)
|
||||
colWithTableIDSuffixAlias(c.w, childSel.Table, childSel.ID,
|
||||
"_join", childSel.Table, childSel.FieldName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
|
||||
childCols []*qcode.Column, skipped uint32) error {
|
||||
var groupBy []int
|
||||
|
||||
isRoot := sel.ID == 0
|
||||
isFil := sel.Where != nil
|
||||
isSearch := sel.Args["search"] != nil
|
||||
isAgg := false
|
||||
|
||||
c.w.WriteString(` FROM (SELECT `)
|
||||
|
||||
for i, col := range sel.Cols {
|
||||
cn := col.Name
|
||||
|
||||
_, isRealCol := ti.Columns[cn]
|
||||
|
||||
if !isRealCol {
|
||||
if isSearch {
|
||||
switch {
|
||||
case cn == "search_rank":
|
||||
cn = ti.TSVCol
|
||||
arg := sel.Args["search"]
|
||||
|
||||
//fmt.Fprintf(w, `ts_rank("%s"."%s", to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Table, cn, arg.Val, col.Name)
|
||||
c.w.WriteString(`ts_rank(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
c.w.WriteString(`, to_tsquery('`)
|
||||
c.w.WriteString(arg.Val)
|
||||
c.w.WriteString(`')`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
case strings.HasPrefix(cn, "search_headline_"):
|
||||
cn = cn[16:]
|
||||
arg := sel.Args["search"]
|
||||
|
||||
//fmt.Fprintf(w, `ts_headline("%s"."%s", to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Table, cn, arg.Val, col.Name)
|
||||
c.w.WriteString(`ts_headlinek(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
c.w.WriteString(`, to_tsquery('`)
|
||||
c.w.WriteString(arg.Val)
|
||||
c.w.WriteString(`')`)
|
||||
alias(c.w, col.Name)
|
||||
}
|
||||
} else {
|
||||
pl := funcPrefixLen(cn)
|
||||
if pl == 0 {
|
||||
//fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
c.w.WriteString(`'`)
|
||||
c.w.WriteString(cn)
|
||||
c.w.WriteString(` not defined'`)
|
||||
alias(c.w, col.Name)
|
||||
} else {
|
||||
isAgg = true
|
||||
fn := cn[0 : pl-1]
|
||||
cn := cn[pl:]
|
||||
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Table, cn, col.Name)
|
||||
c.w.WriteString(fn)
|
||||
c.w.WriteString(`(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
c.w.WriteString(`)`)
|
||||
alias(c.w, col.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
groupBy = append(groupBy, i)
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Table, cn)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
}
|
||||
|
||||
if i < len(sel.Cols)-1 || len(childCols) != 0 {
|
||||
//io.WriteString(w, ", ")
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
}
|
||||
|
||||
for i, col := range childCols {
|
||||
if i != 0 {
|
||||
//io.WriteString(w, ", ")
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, col.Table, col.Name)
|
||||
colWithTable(c.w, col.Table, col.Name)
|
||||
}
|
||||
|
||||
c.w.WriteString(` FROM `)
|
||||
|
||||
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Table)
|
||||
c.w.WriteString(`"`)
|
||||
c.w.WriteString(ti.Name)
|
||||
c.w.WriteString(`"`)
|
||||
|
||||
// if tn, ok := c.tmap[sel.Table]; ok {
|
||||
// //fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, c.sel.Table)
|
||||
// tableWithAlias(c.w, ti.Name, sel.Table)
|
||||
// } else {
|
||||
// //fmt.Fprintf(w, ` FROM "%s"`, c.sel.Table)
|
||||
// c.w.WriteString(`"`)
|
||||
// c.w.WriteString(sel.Table)
|
||||
// c.w.WriteString(`"`)
|
||||
// }
|
||||
|
||||
if isRoot && isFil {
|
||||
c.w.WriteString(` WHERE (`)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
if !isRoot {
|
||||
if err := c.renderJoinTable(sel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.w.WriteString(` WHERE (`)
|
||||
|
||||
if err := c.renderRelationship(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isFil {
|
||||
c.w.WriteString(` AND `)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
if isAgg {
|
||||
if len(groupBy) != 0 {
|
||||
c.w.WriteString(` GROUP BY `)
|
||||
|
||||
for i, id := range groupBy {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Table, c.sel.Cols[id].Name)
|
||||
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Action == 0 {
|
||||
if len(sel.Paging.Limit) != 0 {
|
||||
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
|
||||
c.w.WriteString(` LIMIT ('`)
|
||||
c.w.WriteString(sel.Paging.Limit)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
|
||||
} else if ti.Singular {
|
||||
c.w.WriteString(` LIMIT ('1') :: integer`)
|
||||
|
||||
} else {
|
||||
c.w.WriteString(` LIMIT ('20') :: integer`)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sel.Paging.Offset) != 0 {
|
||||
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
|
||||
c.w.WriteString(` OFFSET ('`)
|
||||
c.w.WriteString(sel.Paging.Offset)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Table, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithID(c.w, ti.Name, sel.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) {
|
||||
colsRendered := len(sel.Cols) != 0
|
||||
|
||||
for i := range sel.OrderBy {
|
||||
if colsRendered {
|
||||
//io.WriteString(w, ", ")
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
|
||||
col := sel.OrderBy[i].Col
|
||||
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d_%s_ob"`,
|
||||
//c.sel.Table, c.sel.ID, c,
|
||||
//c.sel.Table, c.sel.ID, c)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, col)
|
||||
c.w.WriteString(` AS `)
|
||||
tableIDColSuffix(c.w, sel.Table, sel.ID, col, "_ob")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRelationship(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
parent := c.s[sel.ParentID]
|
||||
|
||||
rel, err := c.schema.GetRel(sel.Table, parent.Table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelBelongTo:
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//c.sel.Table, rel.Col1, c.parent.Table, c.parent.ID, rel.Col2)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, rel.Col1)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTableID(c.w, parent.Table, parent.ID, rel.Col2)
|
||||
c.w.WriteString(`))`)
|
||||
|
||||
case RelOneToMany:
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//c.sel.Table, rel.Col1, c.parent.Table, c.parent.ID, rel.Col2)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, rel.Col1)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTableID(c.w, parent.Table, parent.ID, rel.Col2)
|
||||
c.w.WriteString(`))`)
|
||||
|
||||
case RelOneToManyThrough:
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s"."%s"))`,
|
||||
//c.sel.Table, rel.Col1, rel.Through, rel.Col2)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, rel.Col1)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTable(c.w, rel.Through, rel.Col2)
|
||||
c.w.WriteString(`))`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
st := util.NewStack()
|
||||
|
||||
if sel.Where != nil {
|
||||
st.Push(sel.Where)
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
switch val := intf.(type) {
|
||||
case qcode.ExpOp:
|
||||
switch val {
|
||||
case qcode.OpAnd:
|
||||
c.w.WriteString(` AND `)
|
||||
case qcode.OpOr:
|
||||
c.w.WriteString(` OR `)
|
||||
case qcode.OpNot:
|
||||
c.w.WriteString(`NOT `)
|
||||
default:
|
||||
return fmt.Errorf("11: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
case *qcode.Exp:
|
||||
switch val.Op {
|
||||
case qcode.OpAnd, qcode.OpOr:
|
||||
for i := len(val.Children) - 1; i >= 0; i-- {
|
||||
st.Push(val.Children[i])
|
||||
if i > 0 {
|
||||
st.Push(val.Op)
|
||||
}
|
||||
}
|
||||
qcode.FreeExp(val)
|
||||
|
||||
case qcode.OpNot:
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
qcode.FreeExp(val)
|
||||
|
||||
default:
|
||||
if val.NestedCol {
|
||||
//fmt.Fprintf(w, `(("%s") `, val.Col)
|
||||
c.w.WriteString(`(("`)
|
||||
c.w.WriteString(val.Col)
|
||||
c.w.WriteString(`") `)
|
||||
|
||||
} else if len(val.Col) != 0 {
|
||||
//fmt.Fprintf(w, `(("%s"."%s") `, c.sel.Table, val.Col)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, val.Col)
|
||||
c.w.WriteString(`) `)
|
||||
}
|
||||
valExists := true
|
||||
|
||||
switch val.Op {
|
||||
case qcode.OpEquals:
|
||||
c.w.WriteString(`=`)
|
||||
case qcode.OpNotEquals:
|
||||
c.w.WriteString(`!=`)
|
||||
case qcode.OpGreaterOrEquals:
|
||||
c.w.WriteString(`>=`)
|
||||
case qcode.OpLesserOrEquals:
|
||||
c.w.WriteString(`<=`)
|
||||
case qcode.OpGreaterThan:
|
||||
c.w.WriteString(`>`)
|
||||
case qcode.OpLesserThan:
|
||||
c.w.WriteString(`<`)
|
||||
case qcode.OpIn:
|
||||
c.w.WriteString(`IN`)
|
||||
case qcode.OpNotIn:
|
||||
c.w.WriteString(`NOT IN`)
|
||||
case qcode.OpLike:
|
||||
c.w.WriteString(`LIKE`)
|
||||
case qcode.OpNotLike:
|
||||
c.w.WriteString(`NOT LIKE`)
|
||||
case qcode.OpILike:
|
||||
c.w.WriteString(`ILIKE`)
|
||||
case qcode.OpNotILike:
|
||||
c.w.WriteString(`NOT ILIKE`)
|
||||
case qcode.OpSimilar:
|
||||
c.w.WriteString(`SIMILAR TO`)
|
||||
case qcode.OpNotSimilar:
|
||||
c.w.WriteString(`NOT SIMILAR TO`)
|
||||
case qcode.OpContains:
|
||||
c.w.WriteString(`@>`)
|
||||
case qcode.OpContainedIn:
|
||||
c.w.WriteString(`<@`)
|
||||
case qcode.OpHasKey:
|
||||
c.w.WriteString(`?`)
|
||||
case qcode.OpHasKeyAny:
|
||||
c.w.WriteString(`?|`)
|
||||
case qcode.OpHasKeyAll:
|
||||
c.w.WriteString(`?&`)
|
||||
case qcode.OpIsNull:
|
||||
if strings.EqualFold(val.Val, "true") {
|
||||
c.w.WriteString(`IS NULL)`)
|
||||
} else {
|
||||
c.w.WriteString(`IS NOT NULL)`)
|
||||
}
|
||||
valExists = false
|
||||
case qcode.OpEqID:
|
||||
if len(ti.PrimaryCol) == 0 {
|
||||
return fmt.Errorf("no primary key column defined for %s", ti.Name)
|
||||
}
|
||||
//fmt.Fprintf(w, `(("%s") =`, c.ti.PrimaryCol)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, ti.PrimaryCol)
|
||||
//c.w.WriteString(ti.PrimaryCol)
|
||||
c.w.WriteString(`) =`)
|
||||
case qcode.OpTsQuery:
|
||||
if len(ti.TSVCol) == 0 {
|
||||
return fmt.Errorf("no tsv column defined for %s", ti.Name)
|
||||
}
|
||||
//fmt.Fprintf(w, `(("%s") @@ to_tsquery('%s'))`, c.ti.TSVCol, val.Val)
|
||||
c.w.WriteString(`(("`)
|
||||
c.w.WriteString(ti.TSVCol)
|
||||
c.w.WriteString(`") @@ to_tsquery('`)
|
||||
c.w.WriteString(val.Val)
|
||||
c.w.WriteString(`'))`)
|
||||
valExists = false
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected op code %d", val.Op)
|
||||
}
|
||||
|
||||
if valExists {
|
||||
if val.Type == qcode.ValList {
|
||||
c.renderList(val)
|
||||
} else {
|
||||
c.renderVal(val, c.vars)
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
qcode.FreeExp(val)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("12: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOrderBy(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
c.w.WriteString(` ORDER BY `)
|
||||
for i := range sel.OrderBy {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
ob := sel.OrderBy[i]
|
||||
|
||||
switch ob.Order {
|
||||
case qcode.OrderAsc:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` ASC`)
|
||||
case qcode.OrderDesc:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s" DESC`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` DESC`)
|
||||
case qcode.OrderAscNullsFirst:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` ASC NULLS FIRST`)
|
||||
case qcode.OrderDescNullsFirst:
|
||||
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` DESC NULLLS FIRST`)
|
||||
case qcode.OrderAscNullsLast:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s ASC NULLS LAST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` ASC NULLS LAST`)
|
||||
case qcode.OrderDescNullsLast:
|
||||
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` DESC NULLS LAST`)
|
||||
default:
|
||||
return fmt.Errorf("13: unexpected value %v", ob.Order)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDistinctOn(sel *qcode.Select, ti *DBTableInfo) {
|
||||
io.WriteString(c.w, `DISTINCT ON (`)
|
||||
for i := range sel.DistinctOn {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s"`, c.sel.Table, c.sel.ID, c.sel.DistinctOn[i])
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, sel.DistinctOn[i], "_ob")
|
||||
}
|
||||
c.w.WriteString(`) `)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderList(ex *qcode.Exp) {
|
||||
io.WriteString(c.w, ` (`)
|
||||
for i := range ex.ListVal {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
switch ex.ListType {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
c.w.WriteString(ex.ListVal[i])
|
||||
case qcode.ValStr:
|
||||
c.w.WriteString(`'`)
|
||||
c.w.WriteString(ex.ListVal[i])
|
||||
c.w.WriteString(`'`)
|
||||
}
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string) {
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
switch ex.Type {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
if len(ex.Val) != 0 {
|
||||
c.w.WriteString(ex.Val)
|
||||
} else {
|
||||
c.w.WriteString(`''`)
|
||||
}
|
||||
|
||||
case qcode.ValStr:
|
||||
c.w.WriteString(`'`)
|
||||
c.w.WriteString(ex.Val)
|
||||
c.w.WriteString(`'`)
|
||||
|
||||
case qcode.ValVar:
|
||||
if val, ok := vars[ex.Val]; ok {
|
||||
c.w.WriteString(val)
|
||||
} else {
|
||||
//fmt.Fprintf(w, `'{{%s}}'`, ex.Val)
|
||||
c.w.WriteString(`{{`)
|
||||
c.w.WriteString(ex.Val)
|
||||
c.w.WriteString(`}}`)
|
||||
}
|
||||
}
|
||||
//c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "count_"):
|
||||
return 6
|
||||
case strings.HasPrefix(fn, "max_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "min_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "sum_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "stddev_"):
|
||||
return 7
|
||||
case strings.HasPrefix(fn, "stddev_pop_"):
|
||||
return 11
|
||||
case strings.HasPrefix(fn, "stddev_samp_"):
|
||||
return 12
|
||||
case strings.HasPrefix(fn, "variance_"):
|
||||
return 9
|
||||
case strings.HasPrefix(fn, "var_pop_"):
|
||||
return 8
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func hasBit(n uint32, pos uint32) bool {
|
||||
val := n & (1 << pos)
|
||||
return (val > 0)
|
||||
}
|
||||
|
||||
func alias(w *bytes.Buffer, alias string) {
|
||||
w.WriteString(` AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func aliasWithID(w *bytes.Buffer, alias string, id int32) {
|
||||
w.WriteString(` AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func aliasWithIDSuffix(w *bytes.Buffer, alias string, id int32, suffix string) {
|
||||
w.WriteString(` AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(suffix)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithAlias(w *bytes.Buffer, col, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func tableWithAlias(w *bytes.Buffer, table, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTable(w *bytes.Buffer, table, col string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTableID(w *bytes.Buffer, table string, id int32, col string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTableIDAlias(w *bytes.Buffer, table string, id int32, col, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTableIDSuffixAlias(w *bytes.Buffer, table string, id int32,
|
||||
suffix, col, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(suffix)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func tableIDColSuffix(w *bytes.Buffer, table string, id int32, col, suffix string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`_`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(suffix)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
const charset = "0123456789"
|
||||
|
||||
func int2string(w *bytes.Buffer, val int32) {
|
||||
if val < 10 {
|
||||
w.WriteByte(charset[val])
|
||||
return
|
||||
}
|
||||
|
||||
temp := int32(0)
|
||||
val2 := val
|
||||
for val2 > 0 {
|
||||
temp *= 10
|
||||
temp += val2 % 10
|
||||
val2 = int32(math.Floor(float64(val2 / 10)))
|
||||
}
|
||||
|
||||
val3 := temp
|
||||
for val3 > 0 {
|
||||
d := val3 % 10
|
||||
val3 /= 10
|
||||
w.WriteByte(charset[d])
|
||||
}
|
||||
}
|
||||
|
||||
func relID(h *xxhash.Digest, child, parent string) uint64 {
|
||||
h.WriteString(child)
|
||||
h.WriteString(parent)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
return v
|
||||
}
|
@ -1,561 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
const (
|
||||
errNotExpected = "Generated SQL did not match what was expected"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *Compiler
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
|
||||
qcompile, err = qcode.NewCompiler(qcode.Config{
|
||||
DefaultFilter: []string{
|
||||
`{ user_id: { _eq: $user_id } }`,
|
||||
},
|
||||
FilterMap: qcode.Filters{
|
||||
All: map[string][]string{
|
||||
"users": []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
"products": []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
"customers": []string{},
|
||||
"mes": []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
},
|
||||
Query: map[string][]string{
|
||||
"users": []string{},
|
||||
},
|
||||
Update: map[string][]string{
|
||||
"products": []string{
|
||||
"{ user_id: { eq: $user_id } }",
|
||||
},
|
||||
},
|
||||
},
|
||||
Blocklist: []string{
|
||||
"secret",
|
||||
"password",
|
||||
"token",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tables := []*DBTable{
|
||||
&DBTable{Name: "customers", Type: "table"},
|
||||
&DBTable{Name: "users", Type: "table"},
|
||||
&DBTable{Name: "products", Type: "table"},
|
||||
&DBTable{Name: "purchases", Type: "table"},
|
||||
}
|
||||
|
||||
columns := [][]*DBColumn{
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
|
||||
&DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
|
||||
&DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
|
||||
&DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
}
|
||||
|
||||
schema := &DBSchema{
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
al: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
aliases := map[string][]string{
|
||||
"users": []string{"mes"},
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
schema.updateSchema(t, columns[i], aliases)
|
||||
}
|
||||
|
||||
vars := NewVariables(map[string]string{
|
||||
"account_id": "select account_id from users where id = $user_id",
|
||||
})
|
||||
|
||||
pcompile = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(gql string, vars Variables) ([]byte, error) {
|
||||
|
||||
qc, err := qcompile.Compile([]byte(gql))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, sqlStmt, err := pcompile.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sqlStmt, nil
|
||||
}
|
||||
|
||||
func withComplexArgs(t *testing.T) {
|
||||
gql := `query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 20 and < 28 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0" ORDER BY "products_0_price_ob" DESC), '[]') AS "products" FROM (SELECT DISTINCT ON ("products_0_price_ob") row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0", "products_0"."price" AS "products_0_price_ob" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") < 28) AND (("products"."id") >= 20)) LIMIT ('30') :: integer) AS "products_0" ORDER BY "products_0_price_ob" DESC LIMIT ('30') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereMultiOr(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") < 20) OR (("products"."price") > 10) OR NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereIsNull(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") > 10) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereAndList(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: [
|
||||
{ not: { id: { is_null: true } } },
|
||||
{ price: { gt: 10 } },
|
||||
] } ) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") > 10) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func fetchByID(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: 15) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 15)) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func searchQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
products(search: "Imperial") {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("tsv") @@ to_tsquery('Imperial'))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func oneToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
email
|
||||
products {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('users', users) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email", "products_1_join"."products" AS "products") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price") AS "sel_1")) AS "sel_json_1" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func belongsTo(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "users_1_join"."users" AS "users") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "users_1"."email" AS "email") AS "sel_1")) AS "sel_json_1" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "users_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func manyToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "customers_1_join"."customers" AS "customers") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name") AS "sel_1")) AS "sel_json_1" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "customers_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func manyToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
products {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('customers', customers) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "products_1_join"."products" AS "products") AS "sel_0")) AS "sel_json_0" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name") AS "sel_1")) AS "sel_json_1" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func aggFunction(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func aggFunctionWithFilter(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { gt: 10 } }) {
|
||||
id
|
||||
max_price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") > 10)) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func queryWithVariables(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: $PRODUCT_ID, where: { price: { eq: $PRODUCT_PRICE } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") = {{product_price}}) AND (("products"."id") = {{product_id}})) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('me', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = {{user_id}})) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompileSelect(t *testing.T) {
|
||||
t.Run("withComplexArgs", withComplexArgs)
|
||||
t.Run("withWhereAndList", withWhereAndList)
|
||||
t.Run("withWhereIsNull", withWhereIsNull)
|
||||
t.Run("withWhereMultiOr", withWhereMultiOr)
|
||||
t.Run("fetchByID", fetchByID)
|
||||
t.Run("searchQuery", searchQuery)
|
||||
t.Run("belongsTo", belongsTo)
|
||||
t.Run("oneToMany", oneToMany)
|
||||
t.Run("manyToMany", manyToMany)
|
||||
t.Run("manyToManyReverse", manyToManyReverse)
|
||||
t.Run("aggFunction", aggFunction)
|
||||
t.Run("aggFunctionWithFilter", aggFunctionWithFilter)
|
||||
t.Run("syntheticTables", syntheticTables)
|
||||
t.Run("queryWithVariables", queryWithVariables)
|
||||
|
||||
}
|
||||
|
||||
var benchGQL = []byte(`query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
user {
|
||||
full_name
|
||||
picture : avatar
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
func BenchmarkCompile(b *testing.B) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompileParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
for pb.Next() {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
package psql
|
||||
|
||||
type Stack struct {
|
||||
stA [20]int32
|
||||
st []int32
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new Stack
|
||||
func NewStack() *Stack {
|
||||
s := &Stack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the Stack
|
||||
func (s *Stack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the Stack
|
||||
func (s *Stack) Peek() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
return s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the Stack and return it
|
||||
func (s *Stack) Pop() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s.top--
|
||||
return s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the Stack
|
||||
func (s *Stack) Push(value int32) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
47
psql/stack_int.go
Normal file
47
psql/stack_int.go
Normal file
@ -0,0 +1,47 @@
|
||||
package psql
|
||||
|
||||
type IntStack struct {
|
||||
stA [20]int32
|
||||
st []int32
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new IntStack
|
||||
func NewIntStack() *IntStack {
|
||||
s := &IntStack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the IntStack
|
||||
func (s *IntStack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the IntStack
|
||||
func (s *IntStack) Peek() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
return s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the IntStack and return it
|
||||
func (s *IntStack) Pop() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s.top--
|
||||
return s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the IntStack
|
||||
func (s *IntStack) Push(value int32) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
28
psql/strings.go
Normal file
28
psql/strings.go
Normal file
@ -0,0 +1,28 @@
|
||||
package psql
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (rt RelType) String() string {
|
||||
switch rt {
|
||||
case RelOneToOne:
|
||||
return "one to one"
|
||||
case RelOneToMany:
|
||||
return "one to many"
|
||||
case RelOneToManyThrough:
|
||||
return "one to many through"
|
||||
case RelRemote:
|
||||
return "remote"
|
||||
case RelEmbedded:
|
||||
return "embedded"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (re *DBRel) String() string {
|
||||
if re.Type == RelOneToManyThrough {
|
||||
return fmt.Sprintf("'%s.%s' --(Through: %s)--> '%s.%s'",
|
||||
re.Left.Table, re.Left.Col, re.Through, re.Right.Table, re.Right.Col)
|
||||
}
|
||||
return fmt.Sprintf("'%s.%s' --(%s)--> '%s.%s'",
|
||||
re.Left.Table, re.Left.Col, re.Type, re.Right.Table, re.Right.Col)
|
||||
}
|
383
psql/tables.go
383
psql/tables.go
@ -3,19 +3,92 @@ package psql
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gobuffalo/flect"
|
||||
"github.com/jackc/pgtype"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
)
|
||||
|
||||
type DBInfo struct {
|
||||
Version int
|
||||
Tables []DBTable
|
||||
Columns [][]DBColumn
|
||||
colmap map[string]map[string]*DBColumn
|
||||
}
|
||||
|
||||
func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
|
||||
di := &DBInfo{}
|
||||
|
||||
dbc, err := db.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error acquiring connection from pool: %w", err)
|
||||
}
|
||||
defer dbc.Release()
|
||||
|
||||
var version string
|
||||
|
||||
err = dbc.QueryRow(context.Background(), `SHOW server_version_num`).Scan(&version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching version: %w", err)
|
||||
}
|
||||
|
||||
di.Version, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Tables, err = GetTables(dbc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.colmap = make(map[string]map[string]*DBColumn, len(di.Tables))
|
||||
|
||||
for i, t := range di.Tables {
|
||||
cols, err := GetColumns(dbc, "public", t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Columns = append(di.Columns, cols)
|
||||
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for n, c := range di.Columns[i] {
|
||||
di.colmap[t.Key][c.Key] = &di.Columns[i][n]
|
||||
}
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
|
||||
t.ID = di.Tables[len(di.Tables)-1].ID
|
||||
|
||||
di.Tables = append(di.Tables, t)
|
||||
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
cols[i].ID = int16(i)
|
||||
c := &cols[i]
|
||||
di.colmap[t.Key][c.Key] = c
|
||||
}
|
||||
di.Columns = append(di.Columns, cols)
|
||||
}
|
||||
|
||||
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
|
||||
v, ok := di.colmap[strings.ToLower(table)][strings.ToLower(column)]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
type DBTable struct {
|
||||
ID int
|
||||
Name string
|
||||
Key string
|
||||
Type string
|
||||
}
|
||||
|
||||
func GetTables(dbc *pgxpool.Conn) ([]*DBTable, error) {
|
||||
func GetTables(dbc *pgxpool.Conn) ([]DBTable, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
c.relname as "name",
|
||||
@ -32,7 +105,7 @@ WHERE c.relkind IN ('r','v','m','f','')
|
||||
AND n.nspname !~ ('^pg_toast')
|
||||
AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
|
||||
var tables []*DBTable
|
||||
var tables []DBTable
|
||||
|
||||
rows, err := dbc.Query(context.Background(), sqlStmt)
|
||||
if err != nil {
|
||||
@ -40,13 +113,16 @@ AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
t := DBTable{}
|
||||
for i := 0; rows.Next(); i++ {
|
||||
t := DBTable{ID: i}
|
||||
err = rows.Scan(&t.Name, &t.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tables = append(tables, &t)
|
||||
t.Key = strings.ToLower(t.Name)
|
||||
if t.Key != "schema_migrations" && t.Key != "ar_internal_metadata" {
|
||||
tables = append(tables, t)
|
||||
}
|
||||
}
|
||||
|
||||
return tables, nil
|
||||
@ -55,7 +131,9 @@ AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
type DBColumn struct {
|
||||
ID int16
|
||||
Name string
|
||||
Key string
|
||||
Type string
|
||||
Array bool
|
||||
NotNull bool
|
||||
PrimaryKey bool
|
||||
UniqueKey bool
|
||||
@ -64,13 +142,18 @@ type DBColumn struct {
|
||||
fKeyColID pgtype.Int2Array
|
||||
}
|
||||
|
||||
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]*DBColumn, error) {
|
||||
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]DBColumn, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
f.attnum AS id,
|
||||
f.attname AS name,
|
||||
f.attnotnull AS notnull,
|
||||
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
|
||||
CASE
|
||||
WHEN f.attndims != 0 THEN true
|
||||
WHEN right(pg_catalog.format_type(f.atttypid,f.atttypmod), 2) = '[]' THEN true
|
||||
ELSE false
|
||||
END AS array,
|
||||
CASE
|
||||
WHEN p.contype = ('p'::char) THEN true
|
||||
ELSE false
|
||||
@ -84,7 +167,7 @@ SELECT
|
||||
ELSE ''::text
|
||||
END AS foreignkey,
|
||||
CASE
|
||||
WHEN p.contype = ('f'::char) THEN p.confkey
|
||||
WHEN p.contype = ('f'::char) THEN p.confkey::int2[]
|
||||
ELSE ARRAY[]::int2[]
|
||||
END AS foreignkey_fieldnum
|
||||
FROM pg_attribute f
|
||||
@ -93,7 +176,7 @@ FROM pg_attribute f
|
||||
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
||||
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
||||
WHERE c.relkind = ('r'::char)
|
||||
WHERE c.relkind IN ('r', 'v', 'm', 'f')
|
||||
AND n.nspname = $1 -- Replace with Schema name
|
||||
AND c.relname = $2 -- Replace with table name
|
||||
AND f.attnum > 0
|
||||
@ -106,12 +189,12 @@ ORDER BY id;`
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cmap := make(map[int16]*DBColumn)
|
||||
cmap := make(map[int16]DBColumn)
|
||||
|
||||
for rows.Next() {
|
||||
c := DBColumn{}
|
||||
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.PrimaryKey, &c.UniqueKey,
|
||||
&c.FKeyTable, &c.fKeyColID)
|
||||
|
||||
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.Array, &c.PrimaryKey, &c.UniqueKey, &c.FKeyTable, &c.fKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -119,6 +202,7 @@ ORDER BY id;`
|
||||
if v, ok := cmap[c.ID]; ok {
|
||||
if c.PrimaryKey {
|
||||
v.PrimaryKey = true
|
||||
v.UniqueKey = true
|
||||
}
|
||||
if c.NotNull {
|
||||
v.NotNull = true
|
||||
@ -126,260 +210,47 @@ ORDER BY id;`
|
||||
if c.UniqueKey {
|
||||
v.UniqueKey = true
|
||||
}
|
||||
if c.Array {
|
||||
v.Array = true
|
||||
}
|
||||
if len(c.FKeyTable) != 0 {
|
||||
v.FKeyTable = c.FKeyTable
|
||||
}
|
||||
if c.fKeyColID.Elements != nil {
|
||||
v.fKeyColID = c.fKeyColID
|
||||
err := v.fKeyColID.AssignTo(&v.FKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cmap[c.ID] = v
|
||||
} else {
|
||||
err := c.fKeyColID.AssignTo(&c.FKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmap[c.ID] = &c
|
||||
c.Key = strings.ToLower(c.Name)
|
||||
if c.PrimaryKey {
|
||||
c.UniqueKey = true
|
||||
}
|
||||
cmap[c.ID] = c
|
||||
}
|
||||
}
|
||||
|
||||
cols := make([]*DBColumn, 0, len(cmap))
|
||||
for _, v := range cmap {
|
||||
cols = append(cols, v)
|
||||
cols := make([]DBColumn, 0, len(cmap))
|
||||
for i := range cmap {
|
||||
cols = append(cols, cmap[i])
|
||||
}
|
||||
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
type DBSchema struct {
|
||||
t map[string]*DBTableInfo
|
||||
rm map[string]map[string]*DBRel
|
||||
al map[string]struct{}
|
||||
}
|
||||
|
||||
type DBTableInfo struct {
|
||||
Name string
|
||||
Singular bool
|
||||
PrimaryCol string
|
||||
TSVCol string
|
||||
Columns map[string]*DBColumn
|
||||
ColumnNames []string
|
||||
}
|
||||
|
||||
type RelType int
|
||||
|
||||
const (
|
||||
RelBelongTo RelType = iota + 1
|
||||
RelOneToMany
|
||||
RelOneToManyThrough
|
||||
RelRemote
|
||||
)
|
||||
|
||||
type DBRel struct {
|
||||
Type RelType
|
||||
Through string
|
||||
ColT string
|
||||
Col1 string
|
||||
Col2 string
|
||||
}
|
||||
|
||||
func NewDBSchema(db *pgxpool.Pool, aliases map[string][]string) (*DBSchema, error) {
|
||||
schema := &DBSchema{
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
al: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
dbc, err := db.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error acquiring connection from pool")
|
||||
}
|
||||
defer dbc.Release()
|
||||
|
||||
tables, err := GetTables(dbc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, t := range tables {
|
||||
cols, err := GetColumns(dbc, "public", t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema.updateSchema(t, cols, aliases)
|
||||
}
|
||||
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) updateSchema(
|
||||
t *DBTable,
|
||||
cols []*DBColumn,
|
||||
aliases map[string][]string) {
|
||||
|
||||
// Foreign key columns in current table
|
||||
colByID := make(map[int16]*DBColumn)
|
||||
columns := make(map[string]*DBColumn, len(cols))
|
||||
colNames := make([]string, 0, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
name := strings.ToLower(c.Name)
|
||||
columns[name] = c
|
||||
colNames = append(colNames, name)
|
||||
colByID[c.ID] = c
|
||||
}
|
||||
|
||||
singular := strings.ToLower(flect.Singularize(t.Name))
|
||||
s.t[singular] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Singular: true,
|
||||
Columns: columns,
|
||||
ColumnNames: colNames,
|
||||
}
|
||||
|
||||
plural := strings.ToLower(flect.Pluralize(t.Name))
|
||||
s.t[plural] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Singular: false,
|
||||
Columns: columns,
|
||||
ColumnNames: colNames,
|
||||
}
|
||||
|
||||
ct := strings.ToLower(t.Name)
|
||||
|
||||
if al, ok := aliases[ct]; ok {
|
||||
for i := range al {
|
||||
k1 := flect.Singularize(al[i])
|
||||
s.t[k1] = s.t[singular]
|
||||
|
||||
k2 := flect.Pluralize(al[i])
|
||||
s.t[k2] = s.t[plural]
|
||||
|
||||
s.al[k1] = struct{}{}
|
||||
s.al[k2] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
jcols := make([]*DBColumn, 0, len(cols))
|
||||
|
||||
for _, c := range cols {
|
||||
switch {
|
||||
case c.Type == "tsvector":
|
||||
s.t[singular].TSVCol = c.Name
|
||||
s.t[plural].TSVCol = c.Name
|
||||
|
||||
case c.PrimaryKey:
|
||||
s.t[singular].PrimaryCol = c.Name
|
||||
s.t[plural].PrimaryCol = c.Name
|
||||
|
||||
case len(c.FKeyTable) != 0:
|
||||
if len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column name
|
||||
ft := strings.ToLower(c.FKeyTable)
|
||||
fc, ok := colByID[c.FKeyColID[0]]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Belongs-to relation between current table and the
|
||||
// table in the foreign key
|
||||
rel1 := &DBRel{RelBelongTo, "", "", c.Name, fc.Name}
|
||||
s.SetRel(ct, ft, rel1)
|
||||
|
||||
// One-to-many relation between the foreign key table and the
|
||||
// the current table
|
||||
rel2 := &DBRel{RelOneToMany, "", "", fc.Name, c.Name}
|
||||
s.SetRel(ft, ct, rel2)
|
||||
|
||||
jcols = append(jcols, c)
|
||||
}
|
||||
}
|
||||
|
||||
// If table contains multiple foreign key columns it's a possible
|
||||
// join table for many-to-many relationships or multiple one-to-many
|
||||
// relations
|
||||
|
||||
// Below one-to-many relations use the current table as the
|
||||
// join table aka through table.
|
||||
if len(jcols) > 1 {
|
||||
for i := range jcols {
|
||||
for n := range jcols {
|
||||
if n != i {
|
||||
s.updateSchemaOTMT(ct, jcols[i], jcols[n], colByID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DBSchema) updateSchemaOTMT(
|
||||
ct string,
|
||||
col1, col2 *DBColumn,
|
||||
colByID map[int16]*DBColumn) {
|
||||
|
||||
t1 := strings.ToLower(col1.FKeyTable)
|
||||
t2 := strings.ToLower(col2.FKeyTable)
|
||||
|
||||
fc1, ok := colByID[col1.FKeyColID[0]]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
fc2, ok := colByID[col2.FKeyColID[0]]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// One-to-many-through relation between 1nd foreign key table and the
|
||||
// 2nd foreign key table
|
||||
//rel1 := &DBRel{RelOneToManyThrough, ct, fc1.Name, col1.Name}
|
||||
rel1 := &DBRel{RelOneToManyThrough, ct, col2.Name, fc2.Name, col1.Name}
|
||||
s.SetRel(t1, t2, rel1)
|
||||
|
||||
// One-to-many-through relation between 2nd foreign key table and the
|
||||
// 1nd foreign key table
|
||||
//rel2 := &DBRel{RelOneToManyThrough, ct, col2.Name, fc2.Name}
|
||||
rel2 := &DBRel{RelOneToManyThrough, ct, col1.Name, fc1.Name, col2.Name}
|
||||
s.SetRel(t2, t1, rel2)
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
|
||||
t, ok := s.t[table]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown table '%s'", table)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
|
||||
sc := strings.ToLower(flect.Singularize(child))
|
||||
pc := strings.ToLower(flect.Pluralize(child))
|
||||
|
||||
if _, ok := s.rm[sc]; !ok {
|
||||
s.rm[sc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
if _, ok := s.rm[pc]; !ok {
|
||||
s.rm[pc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
sp := strings.ToLower(flect.Singularize(parent))
|
||||
pp := strings.ToLower(flect.Pluralize(parent))
|
||||
|
||||
s.rm[sc][sp] = rel
|
||||
s.rm[sc][pp] = rel
|
||||
s.rm[pc][sp] = rel
|
||||
s.rm[pc][pp] = rel
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
|
||||
rel, ok := s.rm[child][parent]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
|
||||
child, parent)
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) IsAlias(name string) bool {
|
||||
_, ok := s.al[name]
|
||||
return ok
|
||||
}
|
||||
// func GetValType(type string) qcode.ValType {
|
||||
// switch {
|
||||
// case "bigint", "integer", "smallint", "numeric", "bigserial":
|
||||
// return qcode.ValInt
|
||||
// case "double precision", "real":
|
||||
// return qcode.ValFloat
|
||||
// case ""
|
||||
// }
|
||||
// }
|
||||
|
109
psql/test_schema.go
Normal file
109
psql/test_schema.go
Normal file
@ -0,0 +1,109 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getTestSchema() *DBSchema {
|
||||
tables := []DBTable{
|
||||
DBTable{Name: "customers", Type: "table"},
|
||||
DBTable{Name: "users", Type: "table"},
|
||||
DBTable{Name: "products", Type: "table"},
|
||||
DBTable{Name: "purchases", Type: "table"},
|
||||
DBTable{Name: "tags", Type: "table"},
|
||||
DBTable{Name: "tag_count", Type: "json"},
|
||||
}
|
||||
|
||||
columns := [][]DBColumn{
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
|
||||
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
|
||||
}
|
||||
|
||||
for i := range tables {
|
||||
tables[i].Key = strings.ToLower(tables[i].Name)
|
||||
for n := range columns[i] {
|
||||
columns[i][n].Key = strings.ToLower(columns[i][n].Name)
|
||||
}
|
||||
}
|
||||
|
||||
schema := &DBSchema{
|
||||
ver: 110000,
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
}
|
||||
|
||||
aliases := map[string][]string{
|
||||
"users": []string{"mes"},
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.addTable(t, columns[i], aliases)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.firstDegreeRels(t, columns[i])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
err := schema.secondDegreeRels(t, columns[i])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return schema
|
||||
}
|
151
psql/tests.sql
Normal file
151
psql/tests.sql
Normal file
@ -0,0 +1,151 @@
|
||||
=== RUN TestCompileInsert
|
||||
=== RUN TestCompileInsert/simpleInsert
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT json_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/singleInsert
|
||||
WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description", "price", "user_id") SELECT "t"."name", "t"."description", "t"."price", "t"."user_id" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/bulkInsert
|
||||
WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/simpleInsertWithPresets
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, '{{user_id}}' :: bigint FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertManyToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "product_id", "customer_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "products"."id", "customers"."id" FROM "_sg_input" i, "products", "customers", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT json_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "customer_id", "product_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "customers"."id", "products"."id" FROM "_sg_input" i, "customers", "products", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT json_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j->'product') t RETURNING *) SELECT json_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOne
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToManyWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*) SELECT json_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user", "__sj_2"."json" AS "tags" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnectArray
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileInsert (0.02s)
|
||||
--- PASS: TestCompileInsert/simpleInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/singleInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/bulkInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/simpleInsertWithPresets (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertManyToMany (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToMany (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOne (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToManyWithConnect (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnect (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnectArray (0.00s)
|
||||
=== RUN TestCompileMutate
|
||||
=== RUN TestCompileMutate/singleUpsert
|
||||
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/singleUpsertWhere
|
||||
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > '3' :: numeric(7,2)) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/bulkUpsert
|
||||
WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/delete
|
||||
WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '1' :: bigint)) RETURNING "products".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileMutate (0.00s)
|
||||
--- PASS: TestCompileMutate/singleUpsert (0.00s)
|
||||
--- PASS: TestCompileMutate/singleUpsertWhere (0.00s)
|
||||
--- PASS: TestCompileMutate/bulkUpsert (0.00s)
|
||||
--- PASS: TestCompileMutate/delete (0.00s)
|
||||
=== RUN TestCompileQuery
|
||||
=== RUN TestCompileQuery/withComplexArgs
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT DISTINCT ON ("products"."price") "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."id") < '28' :: bigint) AND (("products"."id") >= '20' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) ORDER BY "products"."price" DESC LIMIT ('30') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereAndList
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereIsNull
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereMultiOr
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND ((("products"."price") < '20' :: numeric(7,2)) OR (("products"."price") > '10' :: numeric(7,2)) OR NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/fetchByID
|
||||
SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '{{id}}' :: bigint))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/searchQuery
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."search_rank" AS "search_rank", "products_0"."search_headline_description" AS "search_headline_description" FROM (SELECT "products"."id", "products"."name", ts_rank("products"."tsv", websearch_to_tsquery('{{query}}')) AS "search_rank", ts_headline("products"."description", websearch_to_tsquery('{{query}}')) AS "search_headline_description" FROM "products" WHERE ((("products"."tsv") @@ websearch_to_tsquery('{{query}}'))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToMany
|
||||
SELECT json_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."email" AS "email", "__sj_1"."json" AS "products" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToManyReverse
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "__sj_1"."json" AS "users" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."email" AS "email" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToManyArray
|
||||
SELECT json_build_object('tags', "__sj_0"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "products_2"."name" AS "name", "products_2"."price" AS "price", "__sj_3"."json" AS "tags" FROM (SELECT "products"."name", "products"."price", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_3") AS "json"FROM (SELECT "tags_3"."id" AS "id", "tags_3"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_2"."tags"))) LIMIT ('20') :: integer) AS "tags_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "tags_0"."name" AS "name", "__sj_1"."json" AS "product" FROM (SELECT "tags"."name", "tags"."slug" FROM "tags" LIMIT ('20') :: integer) AS "tags_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" WHERE ((("tags_0"."slug") = any ("products"."tags"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/manyToMany
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "__sj_1"."json" AS "customers" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/manyToManyReverse
|
||||
SELECT json_build_object('customers', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "__sj_1"."json" AS "products" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunction
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionBlockedByCol
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionDisabled
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionWithFilter
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") > '10' :: bigint))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/syntheticTables
|
||||
SELECT json_build_object('me', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/queryWithVariables
|
||||
SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") = '{{product_price}}' :: numeric(7,2)) AND (("products"."id") = '{{product_id}}' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereOnRelations
|
||||
SELECT json_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")) AND ((("products"."price") > '3' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/multiRoot
|
||||
SELECT json_build_object('customer', "__sj_0"."json", 'user', "__sj_1"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "products_2"."id" AS "id", "products_2"."name" AS "name", "__sj_3"."json" AS "customers", "__sj_4"."json" AS "customer" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_4") AS "json"FROM (SELECT "customers_4"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('1') :: integer) AS "customers_4") AS "__sr_4") AS "__sj_4" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_3") AS "json"FROM (SELECT "customers_3"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1", (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "customers_0"."id" AS "id" FROM (SELECT "customers"."id" FROM "customers" LIMIT ('1') :: integer) AS "customers_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/jsonColumnAsTable
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "tag_count" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "tag_count_1"."count" AS "count", "__sj_2"."json" AS "tags" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "tags_2"."name" AS "name" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withCursor
|
||||
SELECT json_build_object('products', "__sj_0"."json", 'products_cursor', "__sj_0"."cursor") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json", CONCAT_WS(',', max("__cur_0"), max("__cur_1")) as "cursor" FROM (SELECT row_to_json("__sr_0") AS "json", "__cur_0", "__cur_1"FROM (SELECT "products_0"."name" AS "name", LAST_VALUE("products_0"."price") OVER() AS "__cur_0", LAST_VALUE("products_0"."id") OVER() AS "__cur_1" FROM (WITH "__cur" AS (SELECT a[1] as "price", a[2] as "id" FROM string_to_array('{{cursor}}', ',') as a) SELECT "products"."name", "products"."id", "products"."price" FROM "products", "__cur" WHERE (((("products"."price") < "__cur"."price" :: numeric(7,2)) OR ((("products"."price") = "__cur"."price" :: numeric(7,2)) AND (("products"."id") > "__cur"."id" :: bigint)))) ORDER BY "products"."price" DESC, "products"."id" ASC LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/nullForAuthRequiredInAnon
|
||||
SELECT json_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", NULL AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/blockedQuery
|
||||
SELECT json_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/blockedFunctions
|
||||
SELECT json_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(json_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."email" AS "email" FROM (SELECT , "users"."email" FROM "users" WHERE (false) GROUP BY "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
--- PASS: TestCompileQuery (0.02s)
|
||||
--- PASS: TestCompileQuery/withComplexArgs (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereAndList (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereIsNull (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereMultiOr (0.00s)
|
||||
--- PASS: TestCompileQuery/fetchByID (0.00s)
|
||||
--- PASS: TestCompileQuery/searchQuery (0.00s)
|
||||
--- PASS: TestCompileQuery/oneToMany (0.00s)
|
||||
--- PASS: TestCompileQuery/oneToManyReverse (0.00s)
|
||||
--- PASS: TestCompileQuery/oneToManyArray (0.00s)
|
||||
--- PASS: TestCompileQuery/manyToMany (0.00s)
|
||||
--- PASS: TestCompileQuery/manyToManyReverse (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunction (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunctionBlockedByCol (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunctionDisabled (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunctionWithFilter (0.00s)
|
||||
--- PASS: TestCompileQuery/syntheticTables (0.00s)
|
||||
--- PASS: TestCompileQuery/queryWithVariables (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereOnRelations (0.00s)
|
||||
--- PASS: TestCompileQuery/multiRoot (0.00s)
|
||||
--- PASS: TestCompileQuery/jsonColumnAsTable (0.00s)
|
||||
--- PASS: TestCompileQuery/withCursor (0.00s)
|
||||
--- PASS: TestCompileQuery/nullForAuthRequiredInAnon (0.00s)
|
||||
--- PASS: TestCompileQuery/blockedQuery (0.00s)
|
||||
--- PASS: TestCompileQuery/blockedFunctions (0.00s)
|
||||
=== RUN TestCompileUpdate
|
||||
=== RUN TestCompileUpdate/singleUpdate
|
||||
WITH "_sg_input" AS (SELECT '{{update}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "description") = (SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE ((("products"."id") = '1' :: bigint) AND (("products"."id") = '{{id}}' :: bigint)) RETURNING "products".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/simpleUpdateWithPresets
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "updated_at") = (SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."user_id") = '{{user_id}}' :: bigint) RETURNING "products".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateManyToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = '{{id}}' :: bigint) RETURNING "purchases".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*) SELECT json_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = '{{id}}' :: bigint) RETURNING "purchases".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*) SELECT json_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_2") AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToMany
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = '8' :: bigint) RETURNING "users".*), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "users" WHERE (("products"."user_id") = ("users"."id") AND "products"."id"= ((i.j->'product'->'where'->>'id'))::bigint) RETURNING "products".*) SELECT json_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOne
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{id}}' :: bigint) RETURNING "products".*), "users" AS (UPDATE "users" SET ("email") = (SELECT "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t) FROM "products" WHERE (("users"."id") = ("products"."user_id")) RETURNING "users".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToManyWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = '{{id}}' :: bigint) RETURNING "users".*), "products_c" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*), "products_d" AS ( UPDATE "products" SET "user_id" = NULL FROM "users" WHERE ("products"."id"= ((i.j->'product'->'disconnect'->>'id'))::bigint) RETURNING "products".*), "products" AS (SELECT * FROM "products_c" UNION ALL SELECT * FROM "products_d") SELECT json_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithConnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{product_id}}' :: bigint) RETURNING "products".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{product_id}}' :: bigint) RETURNING "products".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json("__sr_1") AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithDisconnect
|
||||
WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = '{{id}}' :: bigint) RETURNING "products".*) SELECT json_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT row_to_json("__sr_0") AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileUpdate (0.02s)
|
||||
--- PASS: TestCompileUpdate/singleUpdate (0.00s)
|
||||
--- PASS: TestCompileUpdate/simpleUpdateWithPresets (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateManyToMany (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToMany (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOne (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToManyWithConnect (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithConnect (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithDisconnect (0.00s)
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 0.175s
|
237
psql/update.go
Normal file
237
psql/update.go
Normal file
@ -0,0 +1,237 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
update, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not !defined", qc.ActionVar)
|
||||
}
|
||||
if len(update) == 0 {
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
|
||||
io.WriteString(c.w, qc.ActionVar)
|
||||
io.WriteString(c.w, `}}' :: json AS j)`)
|
||||
|
||||
st := util.NewStack()
|
||||
st.Push(kvitem{_type: itemUpdate, key: ti.Name, val: update, ti: ti})
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
if update[0] == '[' && st.Len() > 1 {
|
||||
return 0, errors.New("Nested bulk update not supported")
|
||||
}
|
||||
intf := st.Pop()
|
||||
|
||||
switch item := intf.(type) {
|
||||
case kvitem:
|
||||
if err := c.handleKVItem(st, item); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case renitem:
|
||||
var err error
|
||||
|
||||
// if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
|
||||
// io.WriteString(c.w, ` WHERE false`)
|
||||
// }
|
||||
|
||||
switch item._type {
|
||||
case itemUpdate:
|
||||
err = c.renderUpdateStmt(w, qc, item)
|
||||
case itemConnect:
|
||||
err = c.renderConnectStmt(qc, w, item)
|
||||
case itemDisconnect:
|
||||
err = c.renderDisconnectStmt(qc, w, item)
|
||||
case itemUnion:
|
||||
err = c.renderUnionStmt(w, item)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpdateStmt(w io.Writer, qc *qcode.QCode, item renitem) error {
|
||||
ti := item.ti
|
||||
jt := item.data
|
||||
sk := nestedUpdateRelColumnsMap(item.kvitem)
|
||||
|
||||
io.WriteString(c.w, `, `)
|
||||
renderCteName(c.w, item.kvitem)
|
||||
io.WriteString(c.w, ` AS (`)
|
||||
|
||||
io.WriteString(w, `UPDATE `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, ` SET (`)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, false)
|
||||
renderNestedUpdateRelColumns(w, item.kvitem, false)
|
||||
|
||||
io.WriteString(w, `) = (SELECT `)
|
||||
renderInsertUpdateColumns(w, qc, jt, ti, sk, true)
|
||||
renderNestedUpdateRelColumns(w, item.kvitem, true)
|
||||
|
||||
io.WriteString(w, ` FROM "_sg_input" i, `)
|
||||
renderNestedUpdateRelTables(w, item.kvitem)
|
||||
|
||||
if item.array {
|
||||
io.WriteString(w, `json_populate_recordset`)
|
||||
} else {
|
||||
io.WriteString(w, `json_populate_record`)
|
||||
}
|
||||
|
||||
io.WriteString(w, `(NULL::`)
|
||||
io.WriteString(w, ti.Name)
|
||||
|
||||
if len(item.path) == 0 {
|
||||
io.WriteString(w, `, i.j) t)`)
|
||||
} else {
|
||||
io.WriteString(w, `, i.j->`)
|
||||
joinPath(w, item.path)
|
||||
io.WriteString(w, `) t) `)
|
||||
}
|
||||
|
||||
if item.id != 0 {
|
||||
// Render sql to set id values if child-to-parent
|
||||
// relationship is one-to-one
|
||||
rel := item.relCP
|
||||
|
||||
io.WriteString(w, `FROM `)
|
||||
quoted(w, rel.Right.Table)
|
||||
|
||||
io.WriteString(w, ` WHERE ((`)
|
||||
colWithTable(w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(w, `) = (`)
|
||||
colWithTable(w, rel.Right.Table, rel.Right.Col)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
if item.relPC.Type == RelOneToMany {
|
||||
if conn, ok := item.data["where"]; ok {
|
||||
io.WriteString(w, ` AND `)
|
||||
renderWhereFromJSON(w, item.kvitem, "where", conn)
|
||||
} else if conn, ok := item.data["_where"]; ok {
|
||||
io.WriteString(w, ` AND `)
|
||||
renderWhereFromJSON(w, item.kvitem, "_where", conn)
|
||||
}
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else {
|
||||
io.WriteString(w, ` WHERE `)
|
||||
if err := c.renderWhere(&qc.Selects[0], ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, `.*)`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func nestedUpdateRelColumnsMap(item kvitem) map[string]struct{} {
|
||||
sk := make(map[string]struct{}, len(item.items))
|
||||
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
sk[v.relCP.Right.Col] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return sk
|
||||
}
|
||||
|
||||
func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
|
||||
// Render child foreign key columns if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
if values {
|
||||
// if v.relCP.Right.Array {
|
||||
// io.WriteString(w, `array_diff(`)
|
||||
// colWithTable(w, v.relCP.Right.Table, v.relCP.Right.Col)
|
||||
// io.WriteString(w, `, `)
|
||||
// }
|
||||
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `".`)
|
||||
quoted(w, v.relCP.Left.Col)
|
||||
} else {
|
||||
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
|
||||
}
|
||||
|
||||
// if v.relCP.Right.Array {
|
||||
// io.WriteString(w, `)`)
|
||||
// }
|
||||
} else {
|
||||
|
||||
quoted(w, v.relCP.Right.Col)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
|
||||
// Render tables needed to set values if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `", `)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDelete(qc *qcode.QCode, w io.Writer,
|
||||
vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
io.WriteString(c.w, `WITH `)
|
||||
quoted(c.w, ti.Name)
|
||||
|
||||
io.WriteString(c.w, ` AS (DELETE FROM `)
|
||||
quoted(c.w, ti.Name)
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if root.Where == nil {
|
||||
return 0, errors.New("'where' clause missing in delete mutation")
|
||||
}
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, `.*) `)
|
||||
return 0, nil
|
||||
}
|
258
psql/update_test.go
Normal file
258
psql/update_test.go
Normal file
@ -0,0 +1,258 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func singleUpdate(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: $id, update: $update, where: { id: { eq: 1 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "anon")
|
||||
}
|
||||
|
||||
func simpleUpdateWithPresets(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{"name": "Apple", "price": 1.25}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func nestedUpdateManyToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
purchase(update: $data, id: $id) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(` {
|
||||
"sale_type": "bought",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(update: $data, where: { id: { eq: 8 } }) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToOne(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data, id: $id) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "thedude@rug.com"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
|
||||
}
|
||||
|
||||
func nestedUpdateOneToManyWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(update: $data, id: $id) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": { "id": 7 },
|
||||
"disconnect": { "id": 8 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToOneWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data, id: $product_id) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": { "id": 5, "email": "test@test.com" }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data, id: $id) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}`
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": { "id": 5 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
// func nestedUpdateOneToOneWithDisconnectArray(t *testing.T) {
|
||||
// gql := `mutation {
|
||||
// product(update: $data, id: 2) {
|
||||
// id
|
||||
// name
|
||||
// user_id
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{
|
||||
// "name": "Apple",
|
||||
// "price": 1.25,
|
||||
// "user": {
|
||||
// "disconnect": { "id": 5 }
|
||||
// }
|
||||
// }`),
|
||||
// }
|
||||
|
||||
// resSQL, err := compileGQLToPSQL(gql, vars, "admin")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
// if string(resSQL) != sql {
|
||||
// t.Fatal(errNotExpected)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestCompileUpdate(t *testing.T) {
|
||||
t.Run("singleUpdate", singleUpdate)
|
||||
t.Run("simpleUpdateWithPresets", simpleUpdateWithPresets)
|
||||
t.Run("nestedUpdateManyToMany", nestedUpdateManyToMany)
|
||||
t.Run("nestedUpdateOneToMany", nestedUpdateOneToMany)
|
||||
t.Run("nestedUpdateOneToOne", nestedUpdateOneToOne)
|
||||
t.Run("nestedUpdateOneToManyWithConnect", nestedUpdateOneToManyWithConnect)
|
||||
t.Run("nestedUpdateOneToOneWithConnect", nestedUpdateOneToOneWithConnect)
|
||||
t.Run("nestedUpdateOneToOneWithDisconnect", nestedUpdateOneToOneWithDisconnect)
|
||||
//t.Run("nestedUpdateOneToOneWithDisconnectArray", nestedUpdateOneToOneWithDisconnectArray)
|
||||
}
|
2
qcode/cleanup.sh
Executable file
2
qcode/cleanup.sh
Executable file
@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
cd corpus && rm -rf $(find . ! -name '00?.gql')
|
136
qcode/config.go
Normal file
136
qcode/config.go
Normal file
@ -0,0 +1,136 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Blocklist []string
|
||||
}
|
||||
|
||||
type QueryConfig struct {
|
||||
Limit int
|
||||
Filters []string
|
||||
Columns []string
|
||||
DisableFunctions bool
|
||||
}
|
||||
|
||||
type InsertConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
}
|
||||
|
||||
type UpdateConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
}
|
||||
|
||||
type DeleteConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
}
|
||||
|
||||
type TRConfig struct {
|
||||
Query QueryConfig
|
||||
Insert InsertConfig
|
||||
Update UpdateConfig
|
||||
Delete DeleteConfig
|
||||
}
|
||||
|
||||
type trval struct {
|
||||
query struct {
|
||||
limit string
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
disable struct {
|
||||
funcs bool
|
||||
}
|
||||
}
|
||||
|
||||
insert struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
psmap map[string]string
|
||||
pslist []string
|
||||
}
|
||||
|
||||
update struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
psmap map[string]string
|
||||
pslist []string
|
||||
}
|
||||
|
||||
delete struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
}
|
||||
}
|
||||
|
||||
func (trv *trval) allowedColumns(qt QType) map[string]struct{} {
|
||||
switch qt {
|
||||
case QTQuery:
|
||||
return trv.query.cols
|
||||
case QTInsert:
|
||||
return trv.insert.cols
|
||||
case QTUpdate:
|
||||
return trv.update.cols
|
||||
case QTDelete:
|
||||
return trv.delete.cols
|
||||
case QTUpsert:
|
||||
return trv.insert.cols
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (trv *trval) filter(qt QType) (*Exp, bool) {
|
||||
switch qt {
|
||||
case QTQuery:
|
||||
return trv.query.fil, trv.query.filNU
|
||||
case QTInsert:
|
||||
return trv.insert.fil, trv.insert.filNU
|
||||
case QTUpdate:
|
||||
return trv.update.fil, trv.update.filNU
|
||||
case QTDelete:
|
||||
return trv.delete.fil, trv.delete.filNU
|
||||
case QTUpsert:
|
||||
return trv.insert.fil, trv.insert.filNU
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func listToMap(list []string) map[string]struct{} {
|
||||
m := make(map[string]struct{}, len(list))
|
||||
for i := range list {
|
||||
m[strings.ToLower(list[i])] = struct{}{}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func mapToList(m map[string]string) []string {
|
||||
list := []string{}
|
||||
for k := range m {
|
||||
list = append(list, strings.ToLower(k))
|
||||
}
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
var varRe = regexp.MustCompile(`\$([a-zA-Z0-9_]+)`)
|
||||
|
||||
func parsePresets(m map[string]string) map[string]string {
|
||||
for k, v := range m {
|
||||
m[k] = varRe.ReplaceAllString(v, `{{$1}}`)
|
||||
}
|
||||
return m
|
||||
}
|
21
qcode/corpus/001.gql
Normal file
21
qcode/corpus/001.gql
Normal file
@ -0,0 +1,21 @@
|
||||
query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
14
qcode/corpus/002.gql
Normal file
14
qcode/corpus/002.gql
Normal file
@ -0,0 +1,14 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
12
qcode/corpus/003.gql
Normal file
12
qcode/corpus/003.gql
Normal file
@ -0,0 +1,12 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
@ -1,14 +1,20 @@
|
||||
// +build gofuzz
|
||||
|
||||
package qcode
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func FuzzerEntrypoint(data []byte) int {
|
||||
//testData := string(data)
|
||||
func Fuzz(data []byte) int {
|
||||
qt := GetQType(string(data))
|
||||
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile(data)
|
||||
if err != nil {
|
||||
return -1
|
||||
if qt > QTUpsert {
|
||||
panic("qt > QTUpsert")
|
||||
}
|
||||
|
||||
return 0
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile(data, "user")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
43
qcode/lex.go
43
qcode/lex.go
@ -28,10 +28,10 @@ type Pos int
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
end Pos // The ending position, in bytes, of this item in the input string.
|
||||
line uint16 // The line number at the start of this item.
|
||||
_type itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
end Pos // The ending position, in bytes, of this item in the input string.
|
||||
line int16 // The line number at the start of this item.
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
@ -87,7 +87,7 @@ type lexer struct {
|
||||
width Pos // width of last rune read from input
|
||||
items []item // array of scanned items
|
||||
itemsA [50]item
|
||||
line uint16 // 1+number of newlines seen
|
||||
line int16 // 1+number of newlines seen
|
||||
err error
|
||||
}
|
||||
|
||||
@ -137,7 +137,7 @@ func (l *lexer) emit(t itemType) {
|
||||
l.items = append(l.items, item{t, l.start, l.pos, l.line})
|
||||
// Some items contain text internally. If so, count their newlines.
|
||||
switch t {
|
||||
case itemName:
|
||||
case itemStringVal:
|
||||
for i := l.start; i < l.pos; i++ {
|
||||
if l.input[i] == '\n' {
|
||||
l.line++
|
||||
@ -147,13 +147,14 @@ func (l *lexer) emit(t itemType) {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
func (l *lexer) emitL(t itemType) {
|
||||
s, e := l.current()
|
||||
lowercase(l.input, s, e)
|
||||
l.emit(t)
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
for i := l.start; i < l.pos; i++ {
|
||||
if l.input[i] == '\n' {
|
||||
l.line++
|
||||
}
|
||||
}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
@ -211,7 +212,7 @@ func lex(l *lexer, input []byte) error {
|
||||
|
||||
l.run()
|
||||
|
||||
if last := l.items[len(l.items)-1]; last.typ == itemError {
|
||||
if last := l.items[len(l.items)-1]; last._type == itemError {
|
||||
return l.err
|
||||
}
|
||||
return nil
|
||||
@ -295,19 +296,17 @@ func lexName(l *lexer) stateFn {
|
||||
l.backup()
|
||||
s, e := l.current()
|
||||
|
||||
lowercase(l.input, s, e)
|
||||
|
||||
switch {
|
||||
case equals(l.input, s, e, queryToken):
|
||||
l.emit(itemQuery)
|
||||
l.emitL(itemQuery)
|
||||
case equals(l.input, s, e, mutationToken):
|
||||
l.emit(itemMutation)
|
||||
l.emitL(itemMutation)
|
||||
case equals(l.input, s, e, subscriptionToken):
|
||||
l.emit(itemSub)
|
||||
l.emitL(itemSub)
|
||||
case equals(l.input, s, e, trueToken):
|
||||
l.emit(itemBoolVal)
|
||||
l.emitL(itemBoolVal)
|
||||
case equals(l.input, s, e, falseToken):
|
||||
l.emit(itemBoolVal)
|
||||
l.emitL(itemBoolVal)
|
||||
default:
|
||||
l.emit(itemName)
|
||||
}
|
||||
@ -432,10 +431,10 @@ func lowercase(b []byte, s Pos, e Pos) {
|
||||
}
|
||||
}
|
||||
|
||||
func (i *item) String() string {
|
||||
func (i item) String() string {
|
||||
var v string
|
||||
|
||||
switch i.typ {
|
||||
switch i._type {
|
||||
case itemEOF:
|
||||
v = "EOF"
|
||||
case itemError:
|
||||
@ -461,7 +460,7 @@ func (i *item) String() string {
|
||||
case itemStringVal:
|
||||
v = "string"
|
||||
}
|
||||
return fmt.Sprintf("%s", v)
|
||||
return v
|
||||
}
|
||||
|
||||
/*
|
||||
|
234
qcode/parse.go
234
qcode/parse.go
@ -16,21 +16,23 @@ var (
|
||||
type parserType int32
|
||||
|
||||
const (
|
||||
maxFields = 100
|
||||
maxArgs = 10
|
||||
maxFields = 1200
|
||||
maxArgs = 25
|
||||
)
|
||||
|
||||
const (
|
||||
parserError parserType = iota
|
||||
parserEOF
|
||||
opQuery
|
||||
opMutate
|
||||
opSub
|
||||
nodeStr
|
||||
nodeInt
|
||||
nodeFloat
|
||||
nodeBool
|
||||
nodeObj
|
||||
nodeList
|
||||
nodeVar
|
||||
NodeStr
|
||||
NodeInt
|
||||
NodeFloat
|
||||
NodeBool
|
||||
NodeObj
|
||||
NodeList
|
||||
NodeVar
|
||||
)
|
||||
|
||||
type Operation struct {
|
||||
@ -83,7 +85,6 @@ type Parser struct {
|
||||
input []byte // the string being scanned
|
||||
pos int
|
||||
items []item
|
||||
depth int
|
||||
err error
|
||||
}
|
||||
|
||||
@ -146,32 +147,28 @@ func parseSelectionSet(gql []byte) (*Operation, error) {
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
}
|
||||
|
||||
if p.peek(itemName) {
|
||||
op = opPool.Get().(*Operation)
|
||||
op.Reset()
|
||||
|
||||
op.Type = opQuery
|
||||
op.Name = ""
|
||||
op.Fields = op.fieldsA[:0]
|
||||
op.Args = op.argsA[:0]
|
||||
op.Fields, err = p.parseFields(op.Fields)
|
||||
|
||||
op, err = p.parseQueryOp()
|
||||
} else {
|
||||
op, err = p.parseOp()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
lexPool.Put(l)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.peek(itemObjClose) {
|
||||
p.ignore()
|
||||
} else {
|
||||
return nil, fmt.Errorf("operation missing closing '}'")
|
||||
}
|
||||
|
||||
if !p.peek(itemEOF) {
|
||||
p.ignore()
|
||||
return nil, fmt.Errorf("invalid '%s' found after closing '}'", p.current())
|
||||
}
|
||||
|
||||
lexPool.Put(l)
|
||||
|
||||
return op, err
|
||||
}
|
||||
|
||||
@ -179,7 +176,7 @@ func (p *Parser) next() item {
|
||||
n := p.pos + 1
|
||||
if n >= len(p.items) {
|
||||
p.err = errEOT
|
||||
return item{typ: itemEOF}
|
||||
return item{_type: itemEOF}
|
||||
}
|
||||
p.pos = n
|
||||
return p.items[p.pos]
|
||||
@ -194,25 +191,21 @@ func (p *Parser) ignore() {
|
||||
p.pos = n
|
||||
}
|
||||
|
||||
func (p *Parser) current() item {
|
||||
return p.items[p.pos]
|
||||
}
|
||||
|
||||
func (p *Parser) eof() bool {
|
||||
n := p.pos + 1
|
||||
return p.items[n].typ == itemEOF
|
||||
func (p *Parser) current() string {
|
||||
item := p.items[p.pos]
|
||||
return b2s(p.input[item.pos:item.end])
|
||||
}
|
||||
|
||||
func (p *Parser) peek(types ...itemType) bool {
|
||||
n := p.pos + 1
|
||||
if p.items[n].typ == itemEOF {
|
||||
return false
|
||||
}
|
||||
// if p.items[n]._type == itemEOF {
|
||||
// return false
|
||||
// }
|
||||
if n >= len(p.items) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(types); i++ {
|
||||
if p.items[n].typ == types[i] {
|
||||
if p.items[n]._type == types[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -229,7 +222,7 @@ func (p *Parser) parseOp() (*Operation, error) {
|
||||
op := opPool.Get().(*Operation)
|
||||
op.Reset()
|
||||
|
||||
switch item.typ {
|
||||
switch item._type {
|
||||
case itemQuery:
|
||||
op.Type = opQuery
|
||||
case itemMutation:
|
||||
@ -249,7 +242,8 @@ func (p *Parser) parseOp() (*Operation, error) {
|
||||
|
||||
if p.peek(itemArgsOpen) {
|
||||
p.ignore()
|
||||
op.Args, err = p.parseArgs(op.Args)
|
||||
|
||||
op.Args, err = p.parseOpParams(op.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -257,6 +251,37 @@ func (p *Parser) parseOp() (*Operation, error) {
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
|
||||
for n := 0; n < 10; n++ {
|
||||
if !p.peek(itemName) {
|
||||
break
|
||||
}
|
||||
|
||||
op.Fields, err = p.parseFields(op.Fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseQueryOp() (*Operation, error) {
|
||||
op := opPool.Get().(*Operation)
|
||||
op.Reset()
|
||||
|
||||
op.Type = opQuery
|
||||
op.Fields = op.fieldsA[:0]
|
||||
op.Args = op.argsA[:0]
|
||||
|
||||
var err error
|
||||
|
||||
for n := 0; n < 10; n++ {
|
||||
if !p.peek(itemName) {
|
||||
break
|
||||
}
|
||||
|
||||
op.Fields, err = p.parseFields(op.Fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -280,11 +305,12 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
|
||||
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if p.peek(itemName) == false {
|
||||
if !p.peek(itemName) {
|
||||
return nil, errors.New("expecting an alias or field name")
|
||||
}
|
||||
|
||||
@ -294,25 +320,32 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
|
||||
f.Args = f.argsA[:0]
|
||||
f.Children = f.childrenA[:0]
|
||||
|
||||
// Parse the inside of the the fields () parentheses
|
||||
// in short parse the args like id, where, etc
|
||||
if err := p.parseField(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.ID != 0 {
|
||||
intf := st.Peek()
|
||||
pid, ok := intf.(int32)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("14: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
intf := st.Peek()
|
||||
if pid, ok := intf.(int32); ok {
|
||||
f.ParentID = pid
|
||||
fields[pid].Children = append(fields[pid].Children, f.ID)
|
||||
} else {
|
||||
f.ParentID = -1
|
||||
}
|
||||
|
||||
// The first opening curley brackets after this
|
||||
// comes the columns or child fields
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
st.Push(f.ID)
|
||||
|
||||
} else if p.peek(itemObjClose) {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -321,17 +354,19 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
|
||||
|
||||
func (p *Parser) parseField(f *Field) error {
|
||||
var err error
|
||||
f.Name = p.val(p.next())
|
||||
v := p.next()
|
||||
|
||||
if p.peek(itemColon) {
|
||||
p.ignore()
|
||||
|
||||
if p.peek(itemName) {
|
||||
f.Alias = f.Name
|
||||
f.Name = p.val(p.next())
|
||||
f.Alias = p.val(v)
|
||||
f.Name = p.vall(p.next())
|
||||
} else {
|
||||
return errors.New("expecting an aliased field name")
|
||||
}
|
||||
} else {
|
||||
f.Name = p.vall(v)
|
||||
}
|
||||
|
||||
if p.peek(itemArgsOpen) {
|
||||
@ -344,6 +379,22 @@ func (p *Parser) parseField(f *Field) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseOpParams(args []Arg) ([]Arg, error) {
|
||||
for {
|
||||
if len(args) >= maxArgs {
|
||||
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
|
||||
}
|
||||
|
||||
if p.peek(itemArgsClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
p.next()
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
|
||||
var err error
|
||||
|
||||
@ -356,13 +407,14 @@ func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
if p.peek(itemName) == false {
|
||||
|
||||
if !p.peek(itemName) {
|
||||
return nil, errors.New("expecting an argument name")
|
||||
}
|
||||
args = append(args, Arg{Name: p.val(p.next())})
|
||||
arg := &args[(len(args) - 1)]
|
||||
|
||||
if p.peek(itemColon) == false {
|
||||
if !p.peek(itemColon) {
|
||||
return nil, errors.New("missing ':' after argument name")
|
||||
}
|
||||
p.ignore()
|
||||
@ -405,7 +457,7 @@ func (p *Parser) parseList() (*Node, error) {
|
||||
return nil, errors.New("List cannot be empty")
|
||||
}
|
||||
|
||||
parent.Type = nodeList
|
||||
parent.Type = NodeList
|
||||
parent.Children = nodes
|
||||
|
||||
return parent, nil
|
||||
@ -423,12 +475,12 @@ func (p *Parser) parseObj() (*Node, error) {
|
||||
break
|
||||
}
|
||||
|
||||
if p.peek(itemName) == false {
|
||||
if !p.peek(itemName) {
|
||||
return nil, errors.New("expecting an argument name")
|
||||
}
|
||||
nodeName := p.val(p.next())
|
||||
|
||||
if p.peek(itemColon) == false {
|
||||
if !p.peek(itemColon) {
|
||||
return nil, errors.New("missing ':' after Field argument name")
|
||||
}
|
||||
p.ignore()
|
||||
@ -442,7 +494,7 @@ func (p *Parser) parseObj() (*Node, error) {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
parent.Type = nodeObj
|
||||
parent.Type = NodeObj
|
||||
parent.Children = nodes
|
||||
|
||||
return parent, nil
|
||||
@ -463,19 +515,19 @@ func (p *Parser) parseValue() (*Node, error) {
|
||||
node := nodePool.Get().(*Node)
|
||||
node.Reset()
|
||||
|
||||
switch item.typ {
|
||||
switch item._type {
|
||||
case itemIntVal:
|
||||
node.Type = nodeInt
|
||||
node.Type = NodeInt
|
||||
case itemFloatVal:
|
||||
node.Type = nodeFloat
|
||||
node.Type = NodeFloat
|
||||
case itemStringVal:
|
||||
node.Type = nodeStr
|
||||
node.Type = NodeStr
|
||||
case itemBoolVal:
|
||||
node.Type = nodeBool
|
||||
node.Type = NodeBool
|
||||
case itemName:
|
||||
node.Type = nodeStr
|
||||
node.Type = NodeStr
|
||||
case itemVariable:
|
||||
node.Type = nodeVar
|
||||
node.Type = NodeVar
|
||||
default:
|
||||
return nil, fmt.Errorf("expecting a number, string, object, list or variable as an argument value (not %s)", p.val(p.next()))
|
||||
}
|
||||
@ -488,6 +540,11 @@ func (p *Parser) val(v item) string {
|
||||
return b2s(p.input[v.pos:v.end])
|
||||
}
|
||||
|
||||
func (p *Parser) vall(v item) string {
|
||||
lowercase(p.input, v.pos, v.end)
|
||||
return b2s(p.input[v.pos:v.end])
|
||||
}
|
||||
|
||||
func b2s(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
@ -506,24 +563,49 @@ func (t parserType) String() string {
|
||||
v = "mutation"
|
||||
case opSub:
|
||||
v = "subscription"
|
||||
case nodeStr:
|
||||
case NodeStr:
|
||||
v = "node-string"
|
||||
case nodeInt:
|
||||
case NodeInt:
|
||||
v = "node-int"
|
||||
case nodeFloat:
|
||||
case NodeFloat:
|
||||
v = "node-float"
|
||||
case nodeBool:
|
||||
case NodeBool:
|
||||
v = "node-bool"
|
||||
case nodeVar:
|
||||
case NodeVar:
|
||||
v = "node-var"
|
||||
case nodeObj:
|
||||
case NodeObj:
|
||||
v = "node-obj"
|
||||
case nodeList:
|
||||
case NodeList:
|
||||
v = "node-list"
|
||||
}
|
||||
return fmt.Sprintf("<%s>", v)
|
||||
}
|
||||
|
||||
func FreeNode(n *Node) {
|
||||
// type Frees struct {
|
||||
// n *Node
|
||||
// loc int
|
||||
// }
|
||||
|
||||
// var freeList []Frees
|
||||
|
||||
// func FreeNode(n *Node, loc int) {
|
||||
// j := -1
|
||||
|
||||
// for i := range freeList {
|
||||
// if n == freeList[i].n {
|
||||
// j = i
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
|
||||
// if j == -1 {
|
||||
// nodePool.Put(n)
|
||||
// freeList = append(freeList, Frees{n, loc})
|
||||
// } else {
|
||||
// fmt.Printf(">>>>(%d) RE_FREE %d %p %s %s\n", loc, freeList[j].loc, freeList[j].n, n.Name, n.Type)
|
||||
// }
|
||||
// }
|
||||
|
||||
func FreeNode(n *Node, loc int) {
|
||||
nodePool.Put(n)
|
||||
}
|
||||
|
@ -5,68 +5,44 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
/*
|
||||
func compareOp(op1, op2 Operation) error {
|
||||
if op1.Type != op2.Type {
|
||||
return errors.New("operator type mismatch")
|
||||
}
|
||||
|
||||
if op1.Name != op2.Name {
|
||||
return errors.New("operator name mismatch")
|
||||
}
|
||||
|
||||
if len(op1.Args) != len(op2.Args) {
|
||||
return errors.New("operator args length mismatch")
|
||||
}
|
||||
|
||||
for i := range op1.Args {
|
||||
if !reflect.DeepEqual(op1.Args[i], op2.Args[i]) {
|
||||
return fmt.Errorf("operator args: %v != %v", op1.Args[i], op2.Args[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(op1.Fields) != len(op2.Fields) {
|
||||
return errors.New("operator field length mismatch")
|
||||
}
|
||||
|
||||
for i := range op1.Fields {
|
||||
if !reflect.DeepEqual(op1.Fields[i].Args, op2.Fields[i].Args) {
|
||||
return fmt.Errorf("operator field args: %v != %v", op1.Fields[i].Args, op2.Fields[i].Args)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range op1.Fields {
|
||||
if !reflect.DeepEqual(op1.Fields[i].Children, op2.Fields[i].Children) {
|
||||
return fmt.Errorf("operator field fields: %v != %v", op1.Fields[i].Children, op2.Fields[i].Children)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
func TestCompile1(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
qc, _ := NewCompiler(Config{})
|
||||
err := qc.AddRole("user", "product", TRConfig{
|
||||
Query: QueryConfig{
|
||||
Columns: []string{"id", "Name"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err := qcompile.Compile([]byte(`
|
||||
product(id: 15) {
|
||||
_, err = qc.Compile([]byte(`
|
||||
query { product(id: 15) {
|
||||
id
|
||||
name
|
||||
}`))
|
||||
} }`), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("this should be an error id must be a variable"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompile2(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
qc, _ := NewCompiler(Config{})
|
||||
err := qc.AddRole("user", "product", TRConfig{
|
||||
Query: QueryConfig{
|
||||
Columns: []string{"ID"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err := qcompile.Compile([]byte(`
|
||||
query { product(id: 15) {
|
||||
_, err = qc.Compile([]byte(`
|
||||
query { product(id: $id) {
|
||||
id
|
||||
name
|
||||
} }`))
|
||||
} }`), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -74,15 +50,23 @@ func TestCompile2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCompile3(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
qc, _ := NewCompiler(Config{})
|
||||
err := qc.AddRole("user", "product", TRConfig{
|
||||
Query: QueryConfig{
|
||||
Columns: []string{"ID"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err := qcompile.Compile([]byte(`
|
||||
_, err = qc.Compile([]byte(`
|
||||
mutation {
|
||||
product(id: 15, name: "Test") {
|
||||
product(id: $test, name: "Test") {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`))
|
||||
}`), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -91,7 +75,7 @@ func TestCompile3(t *testing.T) {
|
||||
|
||||
func TestInvalidCompile1(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(`#`))
|
||||
_, err := qcompile.Compile([]byte(`#`), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
@ -100,7 +84,7 @@ func TestInvalidCompile1(t *testing.T) {
|
||||
|
||||
func TestInvalidCompile2(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(`{u(where:{not:0})}`))
|
||||
_, err := qcompile.Compile([]byte(`{u(where:{not:0})}`), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
@ -109,13 +93,42 @@ func TestInvalidCompile2(t *testing.T) {
|
||||
|
||||
func TestEmptyCompile(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(``))
|
||||
_, err := qcompile.Compile([]byte(``), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPostfixCompile(t *testing.T) {
|
||||
gql := `mutation
|
||||
updateThread {
|
||||
thread(update: $data, where: { slug: { eq: $slug } }) {
|
||||
slug
|
||||
title
|
||||
published
|
||||
createdAt : created_at
|
||||
totalVotes : cached_votes_total
|
||||
totalPosts : cached_posts_total
|
||||
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
|
||||
id
|
||||
}
|
||||
topics {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(gql), "anon")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var gql = []byte(`
|
||||
products(
|
||||
# returns only 30 items
|
||||
@ -144,7 +157,7 @@ func BenchmarkQCompile(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := qcompile.Compile(gql)
|
||||
_, err := qcompile.Compile(gql, "user")
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -160,7 +173,7 @@ func BenchmarkQCompileP(b *testing.B) {
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := qcompile.Compile(gql)
|
||||
_, err := qcompile.Compile(gql, "user")
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
842
qcode/qcode.go
842
qcode/qcode.go
@ -3,6 +3,7 @@ package qcode
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -15,19 +16,42 @@ type Action int
|
||||
|
||||
const (
|
||||
maxSelectors = 30
|
||||
)
|
||||
|
||||
const (
|
||||
QTQuery QType = iota + 1
|
||||
QTMutation
|
||||
|
||||
ActionInsert Action = iota + 1
|
||||
ActionUpdate
|
||||
ActionDelete
|
||||
ActionUpsert
|
||||
QTInsert
|
||||
QTUpdate
|
||||
QTDelete
|
||||
QTUpsert
|
||||
)
|
||||
|
||||
type QCode struct {
|
||||
Type QType
|
||||
Selects []Select
|
||||
Type QType
|
||||
ActionVar string
|
||||
Selects []Select
|
||||
Roots []int32
|
||||
rootsA [5]int32
|
||||
}
|
||||
|
||||
type Select struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Args map[string]*Node
|
||||
Name string
|
||||
FieldName string
|
||||
Cols []Column
|
||||
Where *Exp
|
||||
OrderBy []*OrderBy
|
||||
DistinctOn []string
|
||||
Paging Paging
|
||||
Children []int32
|
||||
Functions bool
|
||||
Allowed map[string]struct{}
|
||||
PresetMap map[string]string
|
||||
PresetList []string
|
||||
SkipRender bool
|
||||
}
|
||||
|
||||
type Column struct {
|
||||
@ -36,33 +60,18 @@ type Column struct {
|
||||
FieldName string
|
||||
}
|
||||
|
||||
type Select struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Args map[string]*Node
|
||||
Table string
|
||||
FieldName string
|
||||
Cols []Column
|
||||
Where *Exp
|
||||
OrderBy []*OrderBy
|
||||
DistinctOn []string
|
||||
Paging Paging
|
||||
Action Action
|
||||
ActionVar string
|
||||
Children []int32
|
||||
}
|
||||
|
||||
type Exp struct {
|
||||
Op ExpOp
|
||||
Col string
|
||||
NestedCol bool
|
||||
Type ValType
|
||||
Val string
|
||||
ListType ValType
|
||||
ListVal []string
|
||||
Children []*Exp
|
||||
childrenA [5]*Exp
|
||||
doFree bool
|
||||
Op ExpOp
|
||||
Col string
|
||||
NestedCols []string
|
||||
Type ValType
|
||||
Table string
|
||||
Val string
|
||||
ListType ValType
|
||||
ListVal []string
|
||||
Children []*Exp
|
||||
childrenA [5]*Exp
|
||||
doFree bool
|
||||
}
|
||||
|
||||
var zeroExp = Exp{doFree: true}
|
||||
@ -76,9 +85,20 @@ type OrderBy struct {
|
||||
Order Order
|
||||
}
|
||||
|
||||
type PagingType int
|
||||
|
||||
const (
|
||||
PtOffset PagingType = iota
|
||||
PtForward
|
||||
PtBackward
|
||||
)
|
||||
|
||||
type Paging struct {
|
||||
Limit string
|
||||
Offset string
|
||||
Type PagingType
|
||||
Limit string
|
||||
Offset string
|
||||
Cursor bool
|
||||
NoLimit bool
|
||||
}
|
||||
|
||||
type ExpOp int
|
||||
@ -110,6 +130,9 @@ const (
|
||||
OpIsNull
|
||||
OpEqID
|
||||
OpTsQuery
|
||||
OpFalse
|
||||
OpNotDistinct
|
||||
OpDistinct
|
||||
)
|
||||
|
||||
type ValType int
|
||||
@ -122,6 +145,7 @@ const (
|
||||
ValList
|
||||
ValVar
|
||||
ValNone
|
||||
ValRef
|
||||
)
|
||||
|
||||
type AggregrateOp int
|
||||
@ -145,37 +169,9 @@ const (
|
||||
OrderDescNullsLast
|
||||
)
|
||||
|
||||
type Filters struct {
|
||||
All map[string][]string
|
||||
Query map[string][]string
|
||||
Insert map[string][]string
|
||||
Update map[string][]string
|
||||
Delete map[string][]string
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
DefaultFilter []string
|
||||
FilterMap Filters
|
||||
Blocklist []string
|
||||
KeepArgs bool
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
df *Exp
|
||||
fm struct {
|
||||
all map[string]*Exp
|
||||
query map[string]*Exp
|
||||
insert map[string]*Exp
|
||||
update map[string]*Exp
|
||||
delete map[string]*Exp
|
||||
}
|
||||
tr map[string]map[string]*trval
|
||||
bl map[string]struct{}
|
||||
ka bool
|
||||
}
|
||||
|
||||
var opMap = map[parserType]QType{
|
||||
opQuery: QTQuery,
|
||||
opMutate: QTMutation,
|
||||
}
|
||||
|
||||
var expPool = sync.Pool{
|
||||
@ -183,43 +179,12 @@ var expPool = sync.Pool{
|
||||
}
|
||||
|
||||
func NewCompiler(c Config) (*Compiler, error) {
|
||||
var err error
|
||||
co := &Compiler{ka: c.KeepArgs}
|
||||
|
||||
co := &Compiler{}
|
||||
co.tr = make(map[string]map[string]*trval)
|
||||
co.bl = make(map[string]struct{}, len(c.Blocklist))
|
||||
|
||||
for i := range c.Blocklist {
|
||||
co.bl[c.Blocklist[i]] = struct{}{}
|
||||
}
|
||||
|
||||
co.df, err = compileFilter(c.DefaultFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co.fm.all, err = buildFilters(c.FilterMap.All)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co.fm.query, err = buildFilters(c.FilterMap.Query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co.fm.insert, err = buildFilters(c.FilterMap.Insert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co.fm.update, err = buildFilters(c.FilterMap.Update)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co.fm.delete, err = buildFilters(c.FilterMap.Delete)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
co.bl[strings.ToLower(c.Blocklist[i])] = struct{}{}
|
||||
}
|
||||
|
||||
seedExp := [100]Exp{}
|
||||
@ -232,60 +197,112 @@ func NewCompiler(c Config) (*Compiler, error) {
|
||||
return co, nil
|
||||
}
|
||||
|
||||
func buildFilters(filMap map[string][]string) (map[string]*Exp, error) {
|
||||
fm := make(map[string]*Exp, len(filMap))
|
||||
func NewFilter() *Exp {
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
for k, v := range filMap {
|
||||
fil, err := compileFilter(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
singular := flect.Singularize(k)
|
||||
plural := flect.Pluralize(k)
|
||||
|
||||
fm[singular] = fil
|
||||
fm[plural] = fil
|
||||
}
|
||||
|
||||
return fm, nil
|
||||
return ex
|
||||
}
|
||||
|
||||
func (com *Compiler) Compile(query []byte) (*QCode, error) {
|
||||
var qc QCode
|
||||
func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
|
||||
var err error
|
||||
trv := &trval{}
|
||||
|
||||
// query config
|
||||
trv.query.fil, trv.query.filNU, err = compileFilter(trc.Query.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if trc.Query.Limit > 0 {
|
||||
trv.query.limit = strconv.Itoa(trc.Query.Limit)
|
||||
}
|
||||
trv.query.cols = listToMap(trc.Query.Columns)
|
||||
trv.query.disable.funcs = trc.Query.DisableFunctions
|
||||
|
||||
// insert config
|
||||
trv.insert.fil, trv.insert.filNU, err = compileFilter(trc.Insert.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trv.insert.cols = listToMap(trc.Insert.Columns)
|
||||
trv.insert.psmap = parsePresets(trc.Insert.Presets)
|
||||
trv.insert.pslist = mapToList(trv.insert.psmap)
|
||||
|
||||
// update config
|
||||
trv.update.fil, trv.update.filNU, err = compileFilter(trc.Update.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trv.update.cols = listToMap(trc.Update.Columns)
|
||||
trv.update.psmap = parsePresets(trc.Update.Presets)
|
||||
trv.update.pslist = mapToList(trv.update.psmap)
|
||||
|
||||
// delete config
|
||||
trv.delete.fil, trv.delete.filNU, err = compileFilter(trc.Delete.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trv.delete.cols = listToMap(trc.Delete.Columns)
|
||||
|
||||
singular := flect.Singularize(table)
|
||||
plural := flect.Pluralize(table)
|
||||
|
||||
if _, ok := com.tr[role]; !ok {
|
||||
com.tr[role] = make(map[string]*trval)
|
||||
}
|
||||
|
||||
com.tr[role][singular] = trv
|
||||
com.tr[role][plural] = trv
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) Compile(query []byte, role string) (*QCode, error) {
|
||||
var err error
|
||||
|
||||
qc := QCode{Type: QTQuery}
|
||||
qc.Roots = qc.rootsA[:0]
|
||||
|
||||
op, err := Parse(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qc.Selects, err = com.compileQuery(op)
|
||||
if err != nil {
|
||||
if err = com.compileQuery(&qc, op, role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t, ok := opMap[op.Type]; ok {
|
||||
qc.Type = t
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unknown operation type %d", op.Type)
|
||||
}
|
||||
|
||||
opPool.Put(op)
|
||||
|
||||
return &qc, nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileQuery(op *Operation) ([]Select, error) {
|
||||
func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
id := int32(0)
|
||||
parentID := int32(0)
|
||||
|
||||
if len(op.Fields) == 0 {
|
||||
return errors.New("invalid graphql no query found")
|
||||
}
|
||||
|
||||
if op.Type == opMutate {
|
||||
if err := com.setMutationType(qc, op.Fields[0].Args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
selects := make([]Select, 0, 5)
|
||||
st := NewStack()
|
||||
action := qc.Type
|
||||
|
||||
if len(op.Fields) == 0 {
|
||||
return nil, errors.New("empty query")
|
||||
return errors.New("empty query")
|
||||
}
|
||||
|
||||
for i := range op.Fields {
|
||||
if op.Fields[i].ParentID == -1 {
|
||||
val := op.Fields[i].ID | (-1 << 16)
|
||||
st.Push(val)
|
||||
}
|
||||
}
|
||||
st.Push(op.Fields[0].ID)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
@ -293,41 +310,72 @@ func (com *Compiler) compileQuery(op *Operation) ([]Select, error) {
|
||||
}
|
||||
|
||||
if id >= maxSelectors {
|
||||
return nil, fmt.Errorf("selector limit reached (%d)", maxSelectors)
|
||||
return fmt.Errorf("selector limit reached (%d)", maxSelectors)
|
||||
}
|
||||
|
||||
fid := st.Pop()
|
||||
val := st.Pop()
|
||||
fid := val & 0xFFFF
|
||||
parentID := (val >> 16) & 0xFFFF
|
||||
|
||||
field := &op.Fields[fid]
|
||||
|
||||
if _, ok := com.bl[field.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if field.ParentID == -1 {
|
||||
parentID = -1
|
||||
}
|
||||
|
||||
trv := com.getRole(role, field.Name)
|
||||
|
||||
selects = append(selects, Select{
|
||||
ID: id,
|
||||
ParentID: parentID,
|
||||
Table: field.Name,
|
||||
Children: make([]int32, 0, 5),
|
||||
ID: id,
|
||||
ParentID: parentID,
|
||||
Name: field.Name,
|
||||
Children: make([]int32, 0, 5),
|
||||
Allowed: trv.allowedColumns(action),
|
||||
Functions: true,
|
||||
})
|
||||
s := &selects[(len(selects) - 1)]
|
||||
|
||||
if s.ID != 0 {
|
||||
p := &selects[s.ParentID]
|
||||
p.Children = append(p.Children, s.ID)
|
||||
switch action {
|
||||
case QTQuery:
|
||||
s.Functions = !trv.query.disable.funcs
|
||||
s.Paging.Limit = trv.query.limit
|
||||
|
||||
case QTInsert:
|
||||
s.PresetMap = trv.insert.psmap
|
||||
s.PresetList = trv.insert.pslist
|
||||
|
||||
case QTUpdate:
|
||||
s.PresetMap = trv.update.psmap
|
||||
s.PresetList = trv.update.pslist
|
||||
}
|
||||
|
||||
if len(field.Alias) != 0 {
|
||||
s.FieldName = field.Alias
|
||||
} else {
|
||||
s.FieldName = s.Table
|
||||
s.FieldName = s.Name
|
||||
}
|
||||
|
||||
err := com.compileArgs(s, field.Args)
|
||||
err := com.compileArgs(qc, s, field.Args, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Order is important AddFilters must come after compileArgs
|
||||
com.AddFilters(qc, s, role)
|
||||
|
||||
if s.ParentID == -1 {
|
||||
qc.Roots = append(qc.Roots, s.ID)
|
||||
} else {
|
||||
p := &selects[s.ParentID]
|
||||
p.Children = append(p.Children, s.ID)
|
||||
}
|
||||
|
||||
s.Cols = make([]Column, 0, len(field.Children))
|
||||
action = QTQuery
|
||||
|
||||
for _, cid := range field.Children {
|
||||
f := op.Fields[cid]
|
||||
@ -337,8 +385,8 @@ func (com *Compiler) compileQuery(op *Operation) ([]Select, error) {
|
||||
}
|
||||
|
||||
if len(f.Children) != 0 {
|
||||
parentID = s.ID
|
||||
st.Push(f.ID)
|
||||
val := f.ID | (s.ID << 16)
|
||||
st.Push(val)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -356,124 +404,152 @@ func (com *Compiler) compileQuery(op *Operation) ([]Select, error) {
|
||||
}
|
||||
|
||||
if id == 0 {
|
||||
return nil, errors.New("invalid query")
|
||||
return errors.New("invalid query")
|
||||
}
|
||||
|
||||
var fil *Exp
|
||||
|
||||
root := &selects[0]
|
||||
|
||||
switch op.Type {
|
||||
case opQuery:
|
||||
fil, _ = com.fm.query[root.Table]
|
||||
|
||||
case opMutate:
|
||||
switch root.Action {
|
||||
case ActionInsert:
|
||||
fil, _ = com.fm.insert[root.Table]
|
||||
case ActionUpdate:
|
||||
fil, _ = com.fm.update[root.Table]
|
||||
case ActionDelete:
|
||||
fil, _ = com.fm.delete[root.Table]
|
||||
case ActionUpsert:
|
||||
fil, _ = com.fm.insert[root.Table]
|
||||
}
|
||||
}
|
||||
|
||||
if fil == nil {
|
||||
fil, _ = com.fm.all[root.Table]
|
||||
}
|
||||
|
||||
if fil == nil {
|
||||
fil = com.df
|
||||
}
|
||||
|
||||
if fil != nil && fil.Op != OpNop {
|
||||
if root.Where != nil {
|
||||
ow := root.Where
|
||||
|
||||
root.Where = expPool.Get().(*Exp)
|
||||
root.Where.Reset()
|
||||
root.Where.Op = OpAnd
|
||||
root.Where.Children = root.Where.childrenA[:2]
|
||||
root.Where.Children[0] = fil
|
||||
root.Where.Children[1] = ow
|
||||
} else {
|
||||
root.Where = fil
|
||||
}
|
||||
}
|
||||
|
||||
return selects[:id], nil
|
||||
qc.Selects = selects[:id]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgs(sel *Select, args []Arg) error {
|
||||
func (com *Compiler) AddFilters(qc *QCode, sel *Select, role string) {
|
||||
var fil *Exp
|
||||
var nu bool
|
||||
|
||||
if trv, ok := com.tr[role][sel.Name]; ok {
|
||||
fil, nu = trv.filter(qc.Type)
|
||||
|
||||
} else if role == "anon" {
|
||||
// Tables not defined under the anon role will not be rendered
|
||||
sel.SkipRender = true
|
||||
}
|
||||
|
||||
if fil == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if nu && role == "anon" {
|
||||
sel.SkipRender = true
|
||||
}
|
||||
|
||||
switch fil.Op {
|
||||
case OpNop:
|
||||
case OpFalse:
|
||||
sel.Where = fil
|
||||
default:
|
||||
AddFilter(sel, fil)
|
||||
}
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg, role string) error {
|
||||
var err error
|
||||
|
||||
if com.ka {
|
||||
sel.Args = make(map[string]*Node, len(args))
|
||||
}
|
||||
// don't free this arg either previously done or will be free'd
|
||||
// in the future like in psql
|
||||
var df bool
|
||||
|
||||
for i := range args {
|
||||
arg := &args[i]
|
||||
|
||||
switch arg.Name {
|
||||
case "id":
|
||||
if sel.ID == 0 {
|
||||
err = com.compileArgID(sel, arg)
|
||||
}
|
||||
err, df = com.compileArgID(sel, arg)
|
||||
|
||||
case "search":
|
||||
err = com.compileArgSearch(sel, arg)
|
||||
err, df = com.compileArgSearch(sel, arg)
|
||||
|
||||
case "where":
|
||||
err = com.compileArgWhere(sel, arg)
|
||||
err, df = com.compileArgWhere(sel, arg, role)
|
||||
|
||||
case "orderby", "order_by", "order":
|
||||
err = com.compileArgOrderBy(sel, arg)
|
||||
err, df = com.compileArgOrderBy(sel, arg)
|
||||
|
||||
case "distinct_on", "distinct":
|
||||
err = com.compileArgDistinctOn(sel, arg)
|
||||
err, df = com.compileArgDistinctOn(sel, arg)
|
||||
|
||||
case "limit":
|
||||
err = com.compileArgLimit(sel, arg)
|
||||
err, df = com.compileArgLimit(sel, arg)
|
||||
|
||||
case "offset":
|
||||
err = com.compileArgOffset(sel, arg)
|
||||
case "insert":
|
||||
sel.Action = ActionInsert
|
||||
err = com.compileArgAction(sel, arg)
|
||||
case "update":
|
||||
sel.Action = ActionUpdate
|
||||
err = com.compileArgAction(sel, arg)
|
||||
case "upsert":
|
||||
sel.Action = ActionUpsert
|
||||
err = com.compileArgAction(sel, arg)
|
||||
case "delete":
|
||||
sel.Action = ActionDelete
|
||||
err = com.compileArgAction(sel, arg)
|
||||
err, df = com.compileArgOffset(sel, arg)
|
||||
|
||||
case "first":
|
||||
err, df = com.compileArgFirstLast(sel, arg, PtForward)
|
||||
|
||||
case "last":
|
||||
err, df = com.compileArgFirstLast(sel, arg, PtBackward)
|
||||
|
||||
case "after":
|
||||
err, df = com.compileArgAfterBefore(sel, arg, PtForward)
|
||||
|
||||
case "before":
|
||||
err, df = com.compileArgAfterBefore(sel, arg, PtBackward)
|
||||
}
|
||||
|
||||
if !df {
|
||||
FreeNode(arg.Val, 5)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sel.Args != nil {
|
||||
sel.Args[arg.Name] = arg.Val
|
||||
} else {
|
||||
nodePool.Put(arg.Val)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, error) {
|
||||
if arg.Val.Type != nodeObj {
|
||||
return nil, fmt.Errorf("expecting an object")
|
||||
func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
|
||||
setActionVar := func(arg *Arg) error {
|
||||
if arg.Val.Type != NodeVar {
|
||||
return argErr(arg.Name, "variable")
|
||||
}
|
||||
qc.ActionVar = arg.Val.Val
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
arg := &args[i]
|
||||
|
||||
switch arg.Name {
|
||||
case "insert":
|
||||
qc.Type = QTInsert
|
||||
return setActionVar(arg)
|
||||
case "update":
|
||||
qc.Type = QTUpdate
|
||||
return setActionVar(arg)
|
||||
case "upsert":
|
||||
qc.Type = QTUpsert
|
||||
return setActionVar(arg)
|
||||
case "delete":
|
||||
qc.Type = QTDelete
|
||||
|
||||
if arg.Val.Type != NodeBool {
|
||||
return argErr(arg.Name, "boolen")
|
||||
}
|
||||
|
||||
if arg.Val.Val == "false" {
|
||||
qc.Type = QTQuery
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, bool, error) {
|
||||
if arg.Val.Type != NodeObj {
|
||||
return nil, false, fmt.Errorf("expecting an object")
|
||||
}
|
||||
|
||||
return com.compileArgNode(st, arg.Val, true)
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, bool, error) {
|
||||
var root *Exp
|
||||
var needsUser bool
|
||||
|
||||
if node == nil || len(node.Children) == 0 {
|
||||
return nil, errors.New("invalid argument value")
|
||||
return nil, false, errors.New("invalid argument value")
|
||||
}
|
||||
|
||||
pushChild(st, nil, node)
|
||||
@ -484,9 +560,10 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
node, ok := intf.(*Node)
|
||||
if !ok || node == nil {
|
||||
return nil, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
|
||||
return nil, needsUser, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
// Objects inside a list
|
||||
@ -502,122 +579,112 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
|
||||
|
||||
ex, err := newExp(st, node, usePool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, needsUser, err
|
||||
}
|
||||
|
||||
if ex == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if ex.Type == ValVar && ex.Val == "user_id" {
|
||||
needsUser = true
|
||||
}
|
||||
|
||||
if node.exp == nil {
|
||||
root = ex
|
||||
} else {
|
||||
node.exp.Children = append(node.exp.Children, ex)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if com.ka {
|
||||
return root, nil
|
||||
}
|
||||
if usePool {
|
||||
st.Push(node)
|
||||
|
||||
pushChild(st, nil, node)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
intf := st.Pop()
|
||||
node, ok := intf.(*Node)
|
||||
if !ok || node == nil {
|
||||
continue
|
||||
}
|
||||
for i := range node.Children {
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
FreeNode(node, 1)
|
||||
}
|
||||
intf := st.Pop()
|
||||
node, _ := intf.(*Node)
|
||||
|
||||
for i := range node.Children {
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
nodePool.Put(node)
|
||||
}
|
||||
|
||||
return root, nil
|
||||
return root, needsUser, nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgID(sel *Select, arg *Arg) (error, bool) {
|
||||
if sel.ID != 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if sel.Where != nil && sel.Where.Op == OpEqID {
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if arg.Val.Type != NodeVar {
|
||||
return argErr("id", "variable"), false
|
||||
}
|
||||
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
ex.Op = OpEqID
|
||||
ex.Type = ValVar
|
||||
ex.Val = arg.Val.Val
|
||||
|
||||
switch arg.Val.Type {
|
||||
case nodeStr:
|
||||
ex.Type = ValStr
|
||||
case nodeInt:
|
||||
ex.Type = ValInt
|
||||
case nodeFloat:
|
||||
ex.Type = ValFloat
|
||||
case nodeVar:
|
||||
ex.Type = ValVar
|
||||
default:
|
||||
fmt.Errorf("expecting a string, int, float or variable")
|
||||
}
|
||||
|
||||
sel.Where = ex
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) (error, bool) {
|
||||
if arg.Val.Type != NodeVar {
|
||||
return argErr("search", "variable"), false
|
||||
}
|
||||
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
ex.Op = OpTsQuery
|
||||
ex.Type = ValStr
|
||||
ex.Type = ValVar
|
||||
ex.Val = arg.Val.Val
|
||||
|
||||
if sel.Where != nil {
|
||||
ow := sel.Where
|
||||
|
||||
sel.Where = expPool.Get().(*Exp)
|
||||
sel.Where.Reset()
|
||||
sel.Where.Op = OpAnd
|
||||
sel.Where.Children = sel.Where.childrenA[:2]
|
||||
sel.Where.Children[0] = ex
|
||||
sel.Where.Children[1] = ow
|
||||
} else {
|
||||
sel.Where = ex
|
||||
if sel.Args == nil {
|
||||
sel.Args = make(map[string]*Node)
|
||||
}
|
||||
return nil
|
||||
|
||||
sel.Args[arg.Name] = arg.Val
|
||||
AddFilter(sel, ex)
|
||||
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg, role string) (error, bool) {
|
||||
st := util.NewStack()
|
||||
var err error
|
||||
|
||||
ex, err := com.compileArgObj(st, arg)
|
||||
ex, nu, err := com.compileArgObj(st, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
return err, false
|
||||
}
|
||||
|
||||
if sel.Where != nil {
|
||||
ow := sel.Where
|
||||
|
||||
sel.Where = expPool.Get().(*Exp)
|
||||
sel.Where.Reset()
|
||||
sel.Where.Op = OpAnd
|
||||
sel.Where.Children = sel.Where.childrenA[:2]
|
||||
sel.Where.Children[0] = ex
|
||||
sel.Where.Children[1] = ow
|
||||
} else {
|
||||
sel.Where = ex
|
||||
if nu && role == "anon" {
|
||||
sel.SkipRender = true
|
||||
}
|
||||
AddFilter(sel, ex)
|
||||
|
||||
return nil
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
if arg.Val.Type != nodeObj {
|
||||
return fmt.Errorf("expecting an object")
|
||||
func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) (error, bool) {
|
||||
if arg.Val.Type != NodeObj {
|
||||
return fmt.Errorf("expecting an object"), false
|
||||
}
|
||||
|
||||
st := util.NewStack()
|
||||
@ -635,24 +702,16 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
node, ok := intf.(*Node)
|
||||
|
||||
if !ok || node == nil {
|
||||
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf)
|
||||
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf), false
|
||||
}
|
||||
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
if !com.ka {
|
||||
nodePool.Put(node)
|
||||
}
|
||||
FreeNode(node, 2)
|
||||
continue
|
||||
}
|
||||
|
||||
if node.Type == nodeObj {
|
||||
for i := range node.Children {
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
if !com.ka {
|
||||
nodePool.Put(node)
|
||||
}
|
||||
continue
|
||||
if node.Type != NodeStr && node.Type != NodeVar {
|
||||
return fmt.Errorf("expecting a string or variable"), false
|
||||
}
|
||||
|
||||
ob := &OrderBy{}
|
||||
@ -671,85 +730,116 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
case "desc_nulls_last":
|
||||
ob.Order = OrderDescNullsLast
|
||||
default:
|
||||
return fmt.Errorf("valid values include asc, desc, asc_nulls_first and desc_nulls_first")
|
||||
return fmt.Errorf("valid values include asc, desc, asc_nulls_first and desc_nulls_first"), false
|
||||
}
|
||||
|
||||
setOrderByColName(ob, node)
|
||||
sel.OrderBy = append(sel.OrderBy, ob)
|
||||
|
||||
if !com.ka {
|
||||
nodePool.Put(node)
|
||||
}
|
||||
FreeNode(node, 3)
|
||||
}
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) (error, bool) {
|
||||
node := arg.Val
|
||||
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if node.Type != nodeList && node.Type != nodeStr {
|
||||
return fmt.Errorf("expecting a list of strings or just a string")
|
||||
if node.Type != NodeList && node.Type != NodeStr {
|
||||
return fmt.Errorf("expecting a list of strings or just a string"), false
|
||||
}
|
||||
|
||||
if node.Type == nodeStr {
|
||||
if node.Type == NodeStr {
|
||||
sel.DistinctOn = append(sel.DistinctOn, node.Val)
|
||||
}
|
||||
|
||||
for i := range node.Children {
|
||||
sel.DistinctOn = append(sel.DistinctOn, node.Children[i].Val)
|
||||
if !com.ka {
|
||||
nodePool.Put(node.Children[i])
|
||||
}
|
||||
FreeNode(node.Children[i], 5)
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) (error, bool) {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != nodeInt {
|
||||
return fmt.Errorf("expecting an integer")
|
||||
if node.Type != NodeInt {
|
||||
return argErr("limit", "number"), false
|
||||
}
|
||||
|
||||
sel.Paging.Limit = node.Val
|
||||
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) (error, bool) {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != nodeInt {
|
||||
return fmt.Errorf("expecting an integer")
|
||||
if node.Type != NodeVar {
|
||||
return argErr("offset", "variable"), false
|
||||
}
|
||||
|
||||
sel.Paging.Offset = node.Val
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgAction(sel *Select, arg *Arg) error {
|
||||
switch sel.Action {
|
||||
case ActionDelete:
|
||||
if arg.Val.Type != nodeBool {
|
||||
return fmt.Errorf("value for argument '%s' must be a boolean", arg.Name)
|
||||
}
|
||||
if arg.Val.Val == "false" {
|
||||
sel.Action = 0
|
||||
}
|
||||
func (com *Compiler) compileArgFirstLast(sel *Select, arg *Arg, pt PagingType) (error, bool) {
|
||||
node := arg.Val
|
||||
|
||||
default:
|
||||
if arg.Val.Type != nodeVar {
|
||||
return fmt.Errorf("value for argument '%s' must be a variable", arg.Name)
|
||||
}
|
||||
sel.ActionVar = arg.Val.Val
|
||||
if node.Type != NodeInt {
|
||||
return argErr(arg.Name, "number"), false
|
||||
}
|
||||
|
||||
return nil
|
||||
sel.Paging.Type = pt
|
||||
sel.Paging.Limit = node.Val
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgAfterBefore(sel *Select, arg *Arg, pt PagingType) (error, bool) {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != NodeVar || node.Val != "cursor" {
|
||||
return fmt.Errorf("value for argument '%s' must be a variable named $cursor", arg.Name), false
|
||||
}
|
||||
sel.Paging.Type = pt
|
||||
sel.Paging.Cursor = true
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var zeroTrv = &trval{}
|
||||
|
||||
func (com *Compiler) getRole(role, field string) *trval {
|
||||
if trv, ok := com.tr[role][field]; ok {
|
||||
return trv
|
||||
} else {
|
||||
return zeroTrv
|
||||
}
|
||||
}
|
||||
|
||||
func AddFilter(sel *Select, fil *Exp) {
|
||||
if sel.Where != nil {
|
||||
ow := sel.Where
|
||||
|
||||
if sel.Where.Op != OpAnd || !sel.Where.doFree {
|
||||
sel.Where = expPool.Get().(*Exp)
|
||||
sel.Where.Reset()
|
||||
sel.Where.Op = OpAnd
|
||||
sel.Where.Children = sel.Where.childrenA[:2]
|
||||
sel.Where.Children[0] = fil
|
||||
sel.Where.Children[1] = ow
|
||||
|
||||
} else {
|
||||
sel.Where.Children = append(sel.Where.Children, fil)
|
||||
}
|
||||
|
||||
} else {
|
||||
sel.Where = fil
|
||||
}
|
||||
}
|
||||
|
||||
func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
@ -766,6 +856,7 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
} else {
|
||||
ex = &Exp{doFree: false}
|
||||
}
|
||||
|
||||
ex.Children = ex.childrenA[:0]
|
||||
|
||||
switch name {
|
||||
@ -847,6 +938,12 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
case "is_null":
|
||||
ex.Op = OpIsNull
|
||||
ex.Val = node.Val
|
||||
case "null_eq", "ndis", "not_distinct":
|
||||
ex.Op = OpNotDistinct
|
||||
ex.Val = node.Val
|
||||
case "null_neq", "dis", "distinct":
|
||||
ex.Op = OpDistinct
|
||||
ex.Val = node.Val
|
||||
default:
|
||||
pushChildren(st, node.exp, node)
|
||||
return nil, nil // skip node
|
||||
@ -854,17 +951,17 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
|
||||
if ex.Op != OpAnd && ex.Op != OpOr && ex.Op != OpNot {
|
||||
switch node.Type {
|
||||
case nodeStr:
|
||||
case NodeStr:
|
||||
ex.Type = ValStr
|
||||
case nodeInt:
|
||||
case NodeInt:
|
||||
ex.Type = ValInt
|
||||
case nodeBool:
|
||||
case NodeBool:
|
||||
ex.Type = ValBool
|
||||
case nodeFloat:
|
||||
case NodeFloat:
|
||||
ex.Type = ValFloat
|
||||
case nodeList:
|
||||
case NodeList:
|
||||
ex.Type = ValList
|
||||
case nodeVar:
|
||||
case NodeVar:
|
||||
ex.Type = ValVar
|
||||
default:
|
||||
return nil, fmt.Errorf("[Where] valid values include string, int, float, boolean and list: %s", node.Type)
|
||||
@ -878,13 +975,13 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
func setListVal(ex *Exp, node *Node) {
|
||||
if len(node.Children) != 0 {
|
||||
switch node.Children[0].Type {
|
||||
case nodeStr:
|
||||
case NodeStr:
|
||||
ex.ListType = ValStr
|
||||
case nodeInt:
|
||||
case NodeInt:
|
||||
ex.ListType = ValInt
|
||||
case nodeBool:
|
||||
case NodeBool:
|
||||
ex.ListType = ValBool
|
||||
case nodeFloat:
|
||||
case NodeFloat:
|
||||
ex.ListType = ValFloat
|
||||
}
|
||||
}
|
||||
@ -897,7 +994,7 @@ func setWhereColName(ex *Exp, node *Node) {
|
||||
var list []string
|
||||
|
||||
for n := node.Parent; n != nil; n = n.Parent {
|
||||
if n.Type != nodeObj {
|
||||
if n.Type != NodeObj {
|
||||
continue
|
||||
}
|
||||
if len(n.Name) != 0 {
|
||||
@ -909,12 +1006,13 @@ func setWhereColName(ex *Exp, node *Node) {
|
||||
list = append([]string{k}, list...)
|
||||
}
|
||||
}
|
||||
if len(list) == 1 {
|
||||
ex.Col = list[0]
|
||||
listlen := len(list)
|
||||
|
||||
} else if len(list) > 2 {
|
||||
ex.Col = buildPath(list)
|
||||
ex.NestedCol = true
|
||||
if listlen == 1 {
|
||||
ex.Col = list[0]
|
||||
} else if listlen > 1 {
|
||||
ex.Col = list[listlen-1]
|
||||
ex.NestedCols = list[:listlen]
|
||||
}
|
||||
}
|
||||
|
||||
@ -941,27 +1039,40 @@ func pushChildren(st *util.Stack, exp *Exp, node *Node) {
|
||||
func pushChild(st *util.Stack, exp *Exp, node *Node) {
|
||||
node.Children[0].exp = exp
|
||||
st.Push(node.Children[0])
|
||||
|
||||
}
|
||||
|
||||
func compileFilter(filter []string) (*Exp, error) {
|
||||
func compileFilter(filter []string) (*Exp, bool, error) {
|
||||
var fl *Exp
|
||||
var needsUser bool
|
||||
|
||||
com := &Compiler{}
|
||||
st := util.NewStack()
|
||||
|
||||
if len(filter) == 0 {
|
||||
return &Exp{Op: OpNop, doFree: false}, nil
|
||||
return &Exp{Op: OpNop, doFree: false}, false, nil
|
||||
}
|
||||
|
||||
for i := range filter {
|
||||
if filter[i] == "false" {
|
||||
return &Exp{Op: OpFalse, doFree: false}, false, nil
|
||||
}
|
||||
|
||||
node, err := ParseArgValue(filter[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
f, err := com.compileArgNode(st, node, false)
|
||||
f, nu, err := com.compileArgNode(st, node, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
if nu {
|
||||
needsUser = true
|
||||
}
|
||||
|
||||
// TODO: Invalid table names in nested where causes fail silently
|
||||
// returning a nil 'f' this needs to be fixed
|
||||
|
||||
// TODO: Invalid where clauses such as missing op (eg. eq) also fail silently
|
||||
|
||||
if fl == nil {
|
||||
fl = f
|
||||
@ -969,7 +1080,7 @@ func compileFilter(filter []string) (*Exp, error) {
|
||||
fl = &Exp{Op: OpAnd, Children: []*Exp{fl, f}, doFree: false}
|
||||
}
|
||||
}
|
||||
return fl, nil
|
||||
return fl, needsUser, nil
|
||||
}
|
||||
|
||||
func buildPath(a []string) string {
|
||||
@ -1056,8 +1167,11 @@ func (t ExpOp) String() string {
|
||||
}
|
||||
|
||||
func FreeExp(ex *Exp) {
|
||||
// fmt.Println(">", ex.doFree)
|
||||
if ex.doFree {
|
||||
expPool.Put(ex)
|
||||
}
|
||||
}
|
||||
|
||||
func argErr(name, ty string) error {
|
||||
return fmt.Errorf("value for argument '%s' must be a %s", name, ty)
|
||||
}
|
||||
|
23
qcode/utils.go
Normal file
23
qcode/utils.go
Normal file
@ -0,0 +1,23 @@
|
||||
package qcode
|
||||
|
||||
func GetQType(gql string) QType {
|
||||
for i := range gql {
|
||||
b := gql[i]
|
||||
if b == '{' {
|
||||
return QTQuery
|
||||
}
|
||||
if al(b) {
|
||||
switch b {
|
||||
case 'm', 'M':
|
||||
return QTMutation
|
||||
case 'q', 'Q':
|
||||
return QTQuery
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func al(b byte) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
|
||||
}
|
@ -4,8 +4,9 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/adjust/gorails/marshal"
|
||||
)
|
||||
|
||||
@ -37,17 +38,20 @@ func NewAuth(version, secret string) (*Auth, error) {
|
||||
AuthSalt: authSalt,
|
||||
}
|
||||
|
||||
ver, err := semver.NewVersion(version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rails auth: %s", err)
|
||||
var v1, v2 int
|
||||
var err error
|
||||
|
||||
sv := strings.Split(version, ".")
|
||||
if len(sv) >= 2 {
|
||||
if v1, err = strconv.Atoi(sv[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v2, err = strconv.Atoi(sv[1]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
gt52, err := semver.NewConstraint(">= 5.2")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rails auth: %s", err)
|
||||
}
|
||||
|
||||
if gt52.Check(ver) {
|
||||
if v1 >= 5 && v2 >= 2 {
|
||||
ra.Cipher = railsCipher52
|
||||
} else {
|
||||
ra.Cipher = railsCipher
|
||||
|
13
scripts/start.sh
Executable file
13
scripts/start.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
if [ $1 = "secrets" ]
|
||||
then
|
||||
./sops --config ./config "${@:2}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if test -f "./config/$SECRETS_FILE"
|
||||
then
|
||||
./sops --config ./config exec-env "./config/$SECRETS_FILE" "$*"
|
||||
else
|
||||
$@
|
||||
fi
|
41
serv/actions.go
Normal file
41
serv/actions.go
Normal file
@ -0,0 +1,41 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type actionFn func(w http.ResponseWriter, r *http.Request) error
|
||||
|
||||
func newAction(a configAction) (http.Handler, error) {
|
||||
var fn actionFn
|
||||
var err error
|
||||
|
||||
if len(a.SQL) != 0 {
|
||||
fn, err = newSQLAction(a)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid config for action '%s'", a.Name)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
httpFn := func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := fn(w, r); err != nil {
|
||||
errlog.Error().Err(err).Send()
|
||||
errorResp(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
return http.HandlerFunc(httpFn), nil
|
||||
}
|
||||
|
||||
func newSQLAction(a configAction) (actionFn, error) {
|
||||
fn := func(w http.ResponseWriter, r *http.Request) error {
|
||||
_, err := db.Exec(r.Context(), a.SQL)
|
||||
return err
|
||||
}
|
||||
|
||||
return fn, nil
|
||||
}
|
263
serv/allow.go
263
serv/allow.go
@ -1,263 +0,0 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
AL_QUERY int = iota + 1
|
||||
AL_VARS
|
||||
)
|
||||
|
||||
type allowItem struct {
|
||||
uri string
|
||||
gql string
|
||||
vars json.RawMessage
|
||||
}
|
||||
|
||||
var _allowList allowList
|
||||
|
||||
type allowList struct {
|
||||
list map[string]*allowItem
|
||||
filepath string
|
||||
saveChan chan *allowItem
|
||||
active bool
|
||||
}
|
||||
|
||||
func initAllowList(cpath string) {
|
||||
_allowList = allowList{
|
||||
list: make(map[string]*allowItem),
|
||||
saveChan: make(chan *allowItem),
|
||||
active: true,
|
||||
}
|
||||
|
||||
if len(cpath) != 0 {
|
||||
fp := path.Join(cpath, "allow.list")
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
_allowList.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Fatal().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
if len(_allowList.filepath) == 0 {
|
||||
fp := "./allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
_allowList.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Fatal().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
if len(_allowList.filepath) == 0 {
|
||||
fp := "./config/allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
_allowList.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
logger.Fatal().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
if len(_allowList.filepath) == 0 {
|
||||
if conf.UseAllowList {
|
||||
logger.Fatal().Msg("allow.list not found")
|
||||
}
|
||||
|
||||
if len(cpath) == 0 {
|
||||
_allowList.filepath = "./config/allow.list"
|
||||
} else {
|
||||
_allowList.filepath = path.Join(cpath, "allow.list")
|
||||
}
|
||||
|
||||
logger.Warn().Msg("allow.list not found")
|
||||
} else {
|
||||
_allowList.load()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for v := range _allowList.saveChan {
|
||||
_allowList.save(v)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (al *allowList) add(req *gqlReq) {
|
||||
if al.active == false || len(req.ref) == 0 || len(req.Query) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var query string
|
||||
|
||||
for i := 0; i < len(req.Query); i++ {
|
||||
c := req.Query[i]
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
|
||||
query = req.Query
|
||||
break
|
||||
|
||||
} else if c == '{' {
|
||||
query = "query " + req.Query
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
al.saveChan <- &allowItem{
|
||||
uri: req.ref,
|
||||
gql: query,
|
||||
vars: req.Vars,
|
||||
}
|
||||
}
|
||||
|
||||
func (al *allowList) load() {
|
||||
if al.active == false {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(al.filepath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var uri string
|
||||
var varBytes []byte
|
||||
|
||||
s, e, c := 0, 0, 0
|
||||
ty := 0
|
||||
|
||||
for {
|
||||
if c == 0 && b[e] == '#' {
|
||||
s = e
|
||||
for e < len(b) && b[e] != '\n' {
|
||||
e++
|
||||
}
|
||||
if (e - s) > 2 {
|
||||
uri = strings.TrimSpace(string(b[(s + 1):e]))
|
||||
}
|
||||
}
|
||||
|
||||
if e >= len(b) {
|
||||
break
|
||||
}
|
||||
|
||||
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
|
||||
if c == 0 {
|
||||
s = e
|
||||
}
|
||||
ty = AL_QUERY
|
||||
} else if matchPrefix(b, e, "variables") {
|
||||
if c == 0 {
|
||||
s = e + len("variables") + 1
|
||||
}
|
||||
ty = AL_VARS
|
||||
} else if b[e] == '{' {
|
||||
c++
|
||||
|
||||
} else if b[e] == '}' {
|
||||
c--
|
||||
|
||||
if c == 0 {
|
||||
if ty == AL_QUERY {
|
||||
q := string(b[s:(e + 1)])
|
||||
|
||||
item := &allowItem{
|
||||
uri: uri,
|
||||
gql: q,
|
||||
}
|
||||
|
||||
if len(varBytes) != 0 {
|
||||
item.vars = varBytes
|
||||
}
|
||||
|
||||
al.list[gqlHash(q, varBytes)] = item
|
||||
varBytes = nil
|
||||
|
||||
} else if ty == AL_VARS {
|
||||
varBytes = b[s:(e + 1)]
|
||||
}
|
||||
ty = 0
|
||||
}
|
||||
}
|
||||
|
||||
e++
|
||||
if e >= len(b) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (al *allowList) save(item *allowItem) {
|
||||
if al.active == false {
|
||||
return
|
||||
}
|
||||
al.list[gqlHash(item.gql, item.vars)] = item
|
||||
|
||||
f, err := os.Create(al.filepath)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msgf("Failed to write allow list: %s", al.filepath)
|
||||
return
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
keys := []string{}
|
||||
urlMap := make(map[string][]*allowItem)
|
||||
|
||||
for _, v := range al.list {
|
||||
urlMap[v.uri] = append(urlMap[v.uri], v)
|
||||
}
|
||||
|
||||
for k := range urlMap {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for i := range keys {
|
||||
k := keys[i]
|
||||
v := urlMap[k]
|
||||
|
||||
f.WriteString(fmt.Sprintf("# %s\n\n", k))
|
||||
|
||||
for i := range v {
|
||||
if len(v[i].vars) != 0 && bytes.Equal(v[i].vars, []byte("{}")) == false {
|
||||
vj, err := json.MarshalIndent(v[i].vars, "", "\t")
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msg("Failed to write allow list 'vars' to file")
|
||||
continue
|
||||
}
|
||||
f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
|
||||
}
|
||||
|
||||
if v[i].gql[0] == '{' {
|
||||
f.WriteString(fmt.Sprintf("query %s\n\n", v[i].gql))
|
||||
} else {
|
||||
f.WriteString(fmt.Sprintf("%s\n\n", v[i].gql))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func matchPrefix(b []byte, i int, s string) bool {
|
||||
if (len(b) - i) < len(s) {
|
||||
return false
|
||||
}
|
||||
for n := 0; n < len(s); n++ {
|
||||
if b[(i+n)] != s[n] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
166
serv/args.go
Normal file
166
serv/args.go
Normal file
@ -0,0 +1,166 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
func argMap(ctx context.Context, vars []byte) func(w io.Writer, tag string) (int, error) {
|
||||
return func(w io.Writer, tag string) (int, error) {
|
||||
switch tag {
|
||||
case "user_id_provider":
|
||||
if v := ctx.Value(userIDProviderKey); v != nil {
|
||||
return io.WriteString(w, v.(string))
|
||||
}
|
||||
return 0, argErr("user_id_provider")
|
||||
|
||||
case "user_id":
|
||||
if v := ctx.Value(userIDKey); v != nil {
|
||||
return io.WriteString(w, v.(string))
|
||||
}
|
||||
return 0, argErr("user_id")
|
||||
|
||||
case "user_role":
|
||||
if v := ctx.Value(userRoleKey); v != nil {
|
||||
return io.WriteString(w, v.(string))
|
||||
}
|
||||
return 0, argErr("user_role")
|
||||
}
|
||||
|
||||
fields := jsn.Get(vars, [][]byte{[]byte(tag)})
|
||||
|
||||
if len(fields) == 0 {
|
||||
return 0, argErr(tag)
|
||||
|
||||
}
|
||||
v := fields[0].Value
|
||||
|
||||
// Open and close quotes
|
||||
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
|
||||
fields[0].Value = v[1 : len(v)-1]
|
||||
}
|
||||
|
||||
if tag == "cursor" {
|
||||
if bytes.EqualFold(v, []byte("null")) {
|
||||
return io.WriteString(w, ``)
|
||||
}
|
||||
v1, err := decrypt(string(fields[0].Value))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return w.Write(v1)
|
||||
}
|
||||
|
||||
return w.Write(escQuote(fields[0].Value))
|
||||
}
|
||||
}
|
||||
|
||||
func argList(ctx *coreContext, args [][]byte) ([]interface{}, error) {
|
||||
vars := make([]interface{}, len(args))
|
||||
|
||||
var fields map[string]json.RawMessage
|
||||
var err error
|
||||
|
||||
if len(ctx.req.Vars) != 0 {
|
||||
fields, _, err = jsn.Tree(ctx.req.Vars)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
av := args[i]
|
||||
switch {
|
||||
case bytes.Equal(av, []byte("user_id")):
|
||||
if v := ctx.Value(userIDKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr("user_id")
|
||||
}
|
||||
|
||||
case bytes.Equal(av, []byte("user_id_provider")):
|
||||
if v := ctx.Value(userIDProviderKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr("user_id_provider")
|
||||
}
|
||||
|
||||
case bytes.Equal(av, []byte("user_role")):
|
||||
if v := ctx.Value(userRoleKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr("user_role")
|
||||
}
|
||||
|
||||
case bytes.Equal(av, []byte("cursor")):
|
||||
if v, ok := fields["cursor"]; ok && v[0] == '"' {
|
||||
v1, err := decrypt(string(v[1 : len(v)-1]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vars[i] = v1
|
||||
} else {
|
||||
return nil, argErr("cursor")
|
||||
}
|
||||
|
||||
default:
|
||||
if v, ok := fields[string(av)]; ok {
|
||||
switch v[0] {
|
||||
case '[', '{':
|
||||
vars[i] = escQuote(v)
|
||||
default:
|
||||
var val interface{}
|
||||
if err := json.Unmarshal(v, &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vars[i] = val
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, argErr(string(av))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vars, nil
|
||||
}
|
||||
|
||||
func escQuote(b []byte) []byte {
|
||||
f := false
|
||||
for i := range b {
|
||||
if b[i] == '\'' {
|
||||
f = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !f {
|
||||
return b
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
s := 0
|
||||
for i := range b {
|
||||
if b[i] == '\'' {
|
||||
buf.Write(b[s:i])
|
||||
buf.WriteString(`''`)
|
||||
s = i + 1
|
||||
}
|
||||
}
|
||||
l := len(b)
|
||||
if s < (l - 1) {
|
||||
buf.Write(b[s:l])
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func argErr(name string) error {
|
||||
return fmt.Errorf("query requires variable '%s' to be set", name)
|
||||
}
|
96
serv/auth.go
96
serv/auth.go
@ -3,46 +3,86 @@ package serv
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
userIDProviderKey = struct{}{}
|
||||
userIDKey = struct{}{}
|
||||
type ctxkey int
|
||||
|
||||
const (
|
||||
userIDProviderKey ctxkey = iota
|
||||
userIDKey
|
||||
userRoleKey
|
||||
)
|
||||
|
||||
func headerAuth(r *http.Request, c *config) *http.Request {
|
||||
if len(c.Auth.Header) == 0 {
|
||||
return nil
|
||||
}
|
||||
func headerAuth(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
userID := r.Header.Get(c.Auth.Header)
|
||||
if len(userID) != 0 {
|
||||
ctx := context.WithValue(r.Context(), userIDKey, userID)
|
||||
return r.WithContext(ctx)
|
||||
}
|
||||
userIDProvider := r.Header.Get("X-User-ID-Provider")
|
||||
if len(userIDProvider) != 0 {
|
||||
ctx = context.WithValue(ctx, userIDProviderKey, userIDProvider)
|
||||
}
|
||||
|
||||
return nil
|
||||
userID := r.Header.Get("X-User-ID")
|
||||
if len(userID) != 0 {
|
||||
ctx = context.WithValue(ctx, userIDKey, userID)
|
||||
}
|
||||
|
||||
userRole := r.Header.Get("X-User-Role")
|
||||
if len(userRole) != 0 {
|
||||
ctx = context.WithValue(ctx, userRoleKey, userRole)
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
func withAuth(next http.HandlerFunc) http.HandlerFunc {
|
||||
at := conf.Auth.Type
|
||||
ru := conf.Auth.Rails.URL
|
||||
func headerHandler(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
hdr := authc.Header
|
||||
|
||||
switch at {
|
||||
if len(hdr.Name) == 0 {
|
||||
errlog.Fatal().Str("auth", authc.Name).Msg("no header.name defined")
|
||||
}
|
||||
|
||||
if !hdr.Exists && len(hdr.Value) == 0 {
|
||||
errlog.Fatal().Str("auth", authc.Name).Msg("no header.value defined")
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var fo1 bool
|
||||
value := r.Header.Get(hdr.Name)
|
||||
|
||||
switch {
|
||||
case hdr.Exists:
|
||||
fo1 = (len(value) == 0)
|
||||
|
||||
default:
|
||||
fo1 = (value != hdr.Value)
|
||||
}
|
||||
|
||||
if fo1 {
|
||||
http.Error(w, "401 unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func withAuth(next http.Handler, authc configAuth) http.Handler {
|
||||
if authc.CredsInHeader {
|
||||
next = headerAuth(authc, next)
|
||||
}
|
||||
|
||||
switch authc.Type {
|
||||
case "rails":
|
||||
if strings.HasPrefix(ru, "memcache:") {
|
||||
return railsMemcacheHandler(next)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(ru, "redis:") {
|
||||
return railsRedisHandler(next)
|
||||
}
|
||||
|
||||
return railsCookieHandler(next)
|
||||
return railsHandler(authc, next)
|
||||
|
||||
case "jwt":
|
||||
return jwtHandler(next)
|
||||
return jwtHandler(authc, next)
|
||||
|
||||
case "header":
|
||||
return headerHandler(authc, next)
|
||||
|
||||
}
|
||||
|
||||
return next
|
||||
|
@ -11,22 +11,21 @@ import (
|
||||
|
||||
const (
|
||||
authHeader = "Authorization"
|
||||
jwtBase int = iota
|
||||
jwtAuth0
|
||||
jwtAuth0 int = iota + 1
|
||||
)
|
||||
|
||||
func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
func jwtHandler(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
var key interface{}
|
||||
var jwtProvider int
|
||||
|
||||
cookie := conf.Auth.Cookie
|
||||
cookie := authc.Cookie
|
||||
|
||||
if conf.Auth.JWT.Provider == "auth0" {
|
||||
if authc.JWT.Provider == "auth0" {
|
||||
jwtProvider = jwtAuth0
|
||||
}
|
||||
|
||||
secret := conf.Auth.JWT.Secret
|
||||
publicKeyFile := conf.Auth.JWT.PubKeyFile
|
||||
secret := authc.JWT.Secret
|
||||
publicKeyFile := authc.JWT.PubKeyFile
|
||||
|
||||
switch {
|
||||
case len(secret) != 0:
|
||||
@ -35,10 +34,10 @@ func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
case len(publicKeyFile) != 0:
|
||||
kd, err := ioutil.ReadFile(publicKeyFile)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
|
||||
switch conf.Auth.JWT.PubKeyType {
|
||||
switch authc.JWT.PubKeyType {
|
||||
case "ecdsa":
|
||||
key, err = jwt.ParseECPublicKeyFromPEM(kd)
|
||||
|
||||
@ -51,18 +50,13 @@ func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var tok string
|
||||
|
||||
if rn := headerAuth(r, conf); rn != nil {
|
||||
next.ServeHTTP(w, rn)
|
||||
return
|
||||
}
|
||||
|
||||
if len(cookie) != 0 {
|
||||
ck, err := r.Cookie(cookie)
|
||||
if err != nil {
|
||||
@ -100,7 +94,9 @@ func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
} else {
|
||||
ctx = context.WithValue(ctx, userIDKey, claims.Subject)
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
|
@ -6,35 +6,50 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
"github.com/dosco/super-graph/rails"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
||||
func railsRedisHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
cookie := conf.Auth.Cookie
|
||||
if len(cookie) == 0 {
|
||||
logger.Fatal().Msg("no auth.cookie defined")
|
||||
func railsHandler(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
ru := authc.Rails.URL
|
||||
|
||||
if strings.HasPrefix(ru, "memcache:") {
|
||||
return railsMemcacheHandler(authc, next)
|
||||
}
|
||||
|
||||
if len(conf.Auth.Rails.URL) == 0 {
|
||||
logger.Fatal().Msg("no auth.rails.url defined")
|
||||
if strings.HasPrefix(ru, "redis:") {
|
||||
return railsRedisHandler(authc, next)
|
||||
}
|
||||
|
||||
return railsCookieHandler(authc, next)
|
||||
}
|
||||
|
||||
func railsRedisHandler(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
cookie := authc.Cookie
|
||||
if len(cookie) == 0 {
|
||||
errlog.Fatal().Msg("no auth.cookie defined")
|
||||
}
|
||||
|
||||
if len(authc.Rails.URL) == 0 {
|
||||
errlog.Fatal().Msg("no auth.rails.url defined")
|
||||
}
|
||||
|
||||
rp := &redis.Pool{
|
||||
MaxIdle: conf.Auth.Rails.MaxIdle,
|
||||
MaxActive: conf.Auth.Rails.MaxActive,
|
||||
MaxIdle: authc.Rails.MaxIdle,
|
||||
MaxActive: authc.Rails.MaxActive,
|
||||
Dial: func() (redis.Conn, error) {
|
||||
c, err := redis.DialURL(conf.Auth.Rails.URL)
|
||||
c, err := redis.DialURL(authc.Rails.URL)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
|
||||
pwd := conf.Auth.Rails.Password
|
||||
pwd := authc.Rails.Password
|
||||
if len(pwd) != 0 {
|
||||
if _, err := c.Do("AUTH", pwd); err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
}
|
||||
return c, err
|
||||
@ -42,11 +57,6 @@ func railsRedisHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if rn := headerAuth(r, conf); rn != nil {
|
||||
next.ServeHTTP(w, rn)
|
||||
return
|
||||
}
|
||||
|
||||
ck, err := r.Cookie(cookie)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
@ -71,29 +81,24 @@ func railsRedisHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func railsMemcacheHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
cookie := conf.Auth.Cookie
|
||||
func railsMemcacheHandler(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
cookie := authc.Cookie
|
||||
if len(cookie) == 0 {
|
||||
logger.Fatal().Msg("no auth.cookie defined")
|
||||
errlog.Fatal().Msg("no auth.cookie defined")
|
||||
}
|
||||
|
||||
if len(conf.Auth.Rails.URL) == 0 {
|
||||
logger.Fatal().Msg("no auth.rails.url defined")
|
||||
if len(authc.Rails.URL) == 0 {
|
||||
errlog.Fatal().Msg("no auth.rails.url defined")
|
||||
}
|
||||
|
||||
rURL, err := url.Parse(conf.Auth.Rails.URL)
|
||||
rURL, err := url.Parse(authc.Rails.URL)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err)
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
|
||||
mc := memcache.New(rURL.Host)
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if rn := headerAuth(r, conf); rn != nil {
|
||||
next.ServeHTTP(w, rn)
|
||||
return
|
||||
}
|
||||
|
||||
ck, err := r.Cookie(cookie)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
@ -118,33 +123,28 @@ func railsMemcacheHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func railsCookieHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
cookie := conf.Auth.Cookie
|
||||
func railsCookieHandler(authc configAuth, next http.Handler) http.HandlerFunc {
|
||||
cookie := authc.Cookie
|
||||
if len(cookie) == 0 {
|
||||
logger.Fatal().Msg("no auth.cookie defined")
|
||||
errlog.Fatal().Msg("no auth.cookie defined")
|
||||
}
|
||||
|
||||
ra, err := railsAuth(conf)
|
||||
ra, err := railsAuth(authc)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err)
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if rn := headerAuth(r, conf); rn != nil {
|
||||
next.ServeHTTP(w, rn)
|
||||
return
|
||||
}
|
||||
|
||||
ck, err := r.Cookie(cookie)
|
||||
if err != nil {
|
||||
logger.Error().Err(err)
|
||||
if err != nil || len(ck.Value) == 0 {
|
||||
logger.Warn().Err(err).Msg("rails cookie missing")
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
userID, err := ra.ParseCookie(ck.Value)
|
||||
if err != nil {
|
||||
logger.Error().Err(err)
|
||||
logger.Warn().Err(err).Msg("failed to parse rails cookie")
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
@ -154,13 +154,13 @@ func railsCookieHandler(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func railsAuth(c *config) (*rails.Auth, error) {
|
||||
secret := c.Auth.Rails.SecretKeyBase
|
||||
func railsAuth(authc configAuth) (*rails.Auth, error) {
|
||||
secret := authc.Rails.SecretKeyBase
|
||||
if len(secret) == 0 {
|
||||
return nil, errors.New("no auth.rails.secret_key_base defined")
|
||||
}
|
||||
|
||||
version := c.Auth.Rails.Version
|
||||
version := authc.Rails.Version
|
||||
if len(version) == 0 {
|
||||
return nil, errors.New("no auth.rails.version defined")
|
||||
}
|
||||
@ -170,16 +170,16 @@ func railsAuth(c *config) (*rails.Auth, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(c.Auth.Rails.Salt) != 0 {
|
||||
ra.Salt = c.Auth.Rails.Salt
|
||||
if len(authc.Rails.Salt) != 0 {
|
||||
ra.Salt = authc.Rails.Salt
|
||||
}
|
||||
|
||||
if len(conf.Auth.Rails.SignSalt) != 0 {
|
||||
ra.SignSalt = c.Auth.Rails.SignSalt
|
||||
if len(authc.Rails.SignSalt) != 0 {
|
||||
ra.SignSalt = authc.Rails.SignSalt
|
||||
}
|
||||
|
||||
if len(conf.Auth.Rails.AuthSalt) != 0 {
|
||||
ra.AuthSalt = c.Auth.Rails.AuthSalt
|
||||
if len(authc.Rails.AuthSalt) != 0 {
|
||||
ra.AuthSalt = authc.Rails.AuthSalt
|
||||
}
|
||||
|
||||
return ra, nil
|
||||
|
214
serv/cmd.go
214
serv/cmd.go
@ -1,16 +1,13 @@
|
||||
package serv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/allow"
|
||||
"github.com/dosco/super-graph/psql"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/gobuffalo/flect"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/log/zerologadapter"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
@ -21,28 +18,36 @@ import (
|
||||
|
||||
const (
|
||||
serverName = "Super Graph"
|
||||
|
||||
authFailBlockAlways = iota + 1
|
||||
authFailBlockPerQuery
|
||||
authFailBlockNever
|
||||
)
|
||||
|
||||
var (
|
||||
logger *zerolog.Logger
|
||||
conf *config
|
||||
confPath string
|
||||
db *pgxpool.Pool
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *psql.Compiler
|
||||
authFailBlock int
|
||||
// These variables are set using -ldflags
|
||||
version string
|
||||
gitBranch string
|
||||
lastCommitSHA string
|
||||
lastCommitTime string
|
||||
)
|
||||
|
||||
func Init() {
|
||||
logger = initLog()
|
||||
var (
|
||||
logger zerolog.Logger // logger for everything but errors
|
||||
errlog zerolog.Logger // logger for errors includes line numbers
|
||||
conf *config // parsed config
|
||||
confPath string // path to the config file
|
||||
db *pgxpool.Pool // database connection pool
|
||||
schema *psql.DBSchema // database tables, columns and relationships
|
||||
allowList *allow.List // allow.list is contains queries allowed in production
|
||||
qcompile *qcode.Compiler // qcode compiler
|
||||
pcompile *psql.Compiler // postgres sql compiler
|
||||
secretKey [32]byte // encryption key
|
||||
internalKey [32]byte // encryption key used for internal needs
|
||||
)
|
||||
|
||||
func Cmd() {
|
||||
initLog()
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "super-graph",
|
||||
Short: "An instant high-performance GraphQL API. No code needed. https://supergraph.dev",
|
||||
Short: BuildDetails(),
|
||||
}
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
@ -117,6 +122,13 @@ e.g. db:migrate -+1
|
||||
Run: cmdDBSetup,
|
||||
})
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "db:reset",
|
||||
Short: "Reset database",
|
||||
Long: "This command will drop, create, migrate and seed the database (won't run in production)",
|
||||
Run: cmdDBReset,
|
||||
})
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "new APP-NAME",
|
||||
Short: "Create a new application",
|
||||
@ -131,154 +143,40 @@ e.g. db:migrate -+1
|
||||
Run: cmdConfDump,
|
||||
})
|
||||
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Super Graph binary version information",
|
||||
Run: cmdVersion,
|
||||
})
|
||||
|
||||
rootCmd.Flags().StringVar(&confPath,
|
||||
"path", "./config", "path to config files")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
func initLog() *zerolog.Logger {
|
||||
out := zerolog.ConsoleWriter{Out: os.Stderr}
|
||||
logger := zerolog.New(out).
|
||||
With().
|
||||
Timestamp().
|
||||
Caller().
|
||||
Logger()
|
||||
|
||||
return &logger
|
||||
func cmdVersion(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("%s\n", BuildDetails())
|
||||
}
|
||||
|
||||
func initConf() (*config, error) {
|
||||
vi := newConfig()
|
||||
func BuildDetails() string {
|
||||
return fmt.Sprintf(`
|
||||
Super Graph %v
|
||||
For documentation, visit https://supergraph.dev
|
||||
|
||||
if err := vi.ReadInConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
Commit SHA-1 : %v
|
||||
Commit timestamp : %v
|
||||
Branch : %v
|
||||
Go version : %v
|
||||
|
||||
c := &config{}
|
||||
|
||||
if err := vi.Unmarshal(c); err != nil {
|
||||
return nil, fmt.Errorf("unable to decode config, %v", err)
|
||||
}
|
||||
|
||||
if len(c.Tables) == 0 {
|
||||
c.Tables = c.DB.Tables
|
||||
}
|
||||
|
||||
for k, v := range c.Inflections {
|
||||
flect.AddPlural(k, v)
|
||||
}
|
||||
|
||||
for i := range c.Tables {
|
||||
t := c.Tables[i]
|
||||
t.Name = flect.Pluralize(strings.ToLower(t.Name))
|
||||
}
|
||||
|
||||
authFailBlock = getAuthFailBlock(c)
|
||||
|
||||
logLevel, err := zerolog.ParseLevel(c.LogLevel)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("error setting log_level")
|
||||
}
|
||||
zerolog.SetGlobalLevel(logLevel)
|
||||
|
||||
//fmt.Printf("%#v", c)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
|
||||
config, _ := pgx.ParseConfig("")
|
||||
config.Host = c.DB.Host
|
||||
config.Port = c.DB.Port
|
||||
config.User = c.DB.User
|
||||
config.Password = c.DB.Password
|
||||
config.RuntimeParams = map[string]string{
|
||||
"application_name": c.AppName,
|
||||
"search_path": c.DB.Schema,
|
||||
}
|
||||
|
||||
if useDB {
|
||||
config.Database = c.DB.DBName
|
||||
}
|
||||
|
||||
switch c.LogLevel {
|
||||
case "debug":
|
||||
config.LogLevel = pgx.LogLevelDebug
|
||||
case "info":
|
||||
config.LogLevel = pgx.LogLevelInfo
|
||||
case "warn":
|
||||
config.LogLevel = pgx.LogLevelWarn
|
||||
case "error":
|
||||
config.LogLevel = pgx.LogLevelError
|
||||
default:
|
||||
config.LogLevel = pgx.LogLevelNone
|
||||
}
|
||||
|
||||
config.Logger = zerologadapter.NewLogger(*logger)
|
||||
|
||||
db, err := pgx.ConnectConfig(context.Background(), config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func initDBPool(c *config) (*pgxpool.Pool, error) {
|
||||
config, _ := pgxpool.ParseConfig("")
|
||||
config.ConnConfig.Host = c.DB.Host
|
||||
config.ConnConfig.Port = c.DB.Port
|
||||
config.ConnConfig.Database = c.DB.DBName
|
||||
config.ConnConfig.User = c.DB.User
|
||||
config.ConnConfig.Password = c.DB.Password
|
||||
config.ConnConfig.RuntimeParams = map[string]string{
|
||||
"application_name": c.AppName,
|
||||
"search_path": c.DB.Schema,
|
||||
}
|
||||
|
||||
switch c.LogLevel {
|
||||
case "debug":
|
||||
config.ConnConfig.LogLevel = pgx.LogLevelDebug
|
||||
case "info":
|
||||
config.ConnConfig.LogLevel = pgx.LogLevelInfo
|
||||
case "warn":
|
||||
config.ConnConfig.LogLevel = pgx.LogLevelWarn
|
||||
case "error":
|
||||
config.ConnConfig.LogLevel = pgx.LogLevelError
|
||||
default:
|
||||
config.ConnConfig.LogLevel = pgx.LogLevelNone
|
||||
}
|
||||
|
||||
config.ConnConfig.Logger = zerologadapter.NewLogger(*logger)
|
||||
|
||||
// if c.DB.MaxRetries != 0 {
|
||||
// opt.MaxRetries = c.DB.MaxRetries
|
||||
// }
|
||||
|
||||
if c.DB.PoolSize != 0 {
|
||||
config.MaxConns = conf.DB.PoolSize
|
||||
}
|
||||
|
||||
db, err := pgxpool.ConnectConfig(context.Background(), config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func initCompiler() {
|
||||
var err error
|
||||
|
||||
qcompile, pcompile, err = initCompilers(conf)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to initialize compilers")
|
||||
}
|
||||
|
||||
if err := initResolvers(); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to initialized resolvers")
|
||||
}
|
||||
Licensed under the Apache Public License 2.0
|
||||
Copyright 2020, Vikram Rangnekar.
|
||||
`,
|
||||
version,
|
||||
lastCommitSHA,
|
||||
lastCommitTime,
|
||||
gitBranch,
|
||||
runtime.Version())
|
||||
}
|
||||
|
@ -9,20 +9,19 @@ import (
|
||||
|
||||
func cmdConfDump(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
cmd.Help()
|
||||
cmd.Help() //nolint: errcheck
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fname := fmt.Sprintf("%s.%s", getConfigName(), args[0])
|
||||
|
||||
vi := newConfig()
|
||||
|
||||
if err := vi.ReadInConfig(); err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
conf, err := initConf()
|
||||
if err != nil {
|
||||
errlog.Fatal().Err(err).Msg("failed to read config")
|
||||
}
|
||||
|
||||
if err := vi.WriteConfigAs(fname); err != nil {
|
||||
logger.Fatal().Err(err).Send()
|
||||
if err := conf.Viper.WriteConfigAs(fname); err != nil {
|
||||
errlog.Fatal().Err(err).Send()
|
||||
}
|
||||
|
||||
logger.Info().Msgf("config dumped to ./%s", fname)
|
||||
|
@ -14,19 +14,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var sampleMigration = `-- This is a sample migration.
|
||||
|
||||
create table users(
|
||||
id serial primary key,
|
||||
fullname varchar not null,
|
||||
email varchar not null
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table users;
|
||||
`
|
||||
|
||||
var newMigrationText = `-- Write your migrate up statements here
|
||||
|
||||
---- create above / drop below ----
|
||||
@ -36,6 +23,7 @@ var newMigrationText = `-- Write your migrate up statements here
|
||||
`
|
||||
|
||||
func cmdDBSetup(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
cmdDBCreate(cmd, []string{})
|
||||
cmdDBMigrate(cmd, []string{"up"})
|
||||
|
||||
@ -47,58 +35,59 @@ func cmdDBSetup(cmd *cobra.Command, args []string) {
|
||||
return
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) == false {
|
||||
logger.Fatal().Err(err).Msgf("unable to check if '%s' exists", sfile)
|
||||
if !os.IsNotExist(err) {
|
||||
errlog.Fatal().Err(err).Msgf("unable to check if '%s' exists", sfile)
|
||||
}
|
||||
|
||||
logger.Warn().Msgf("failed to read seed file '%s'", sfile)
|
||||
}
|
||||
|
||||
func cmdDBCreate(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
func cmdDBReset(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
|
||||
if conf, err = initConf(); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to read config")
|
||||
if conf.Production {
|
||||
errlog.Fatal().Msg("db:reset does not work in production")
|
||||
return
|
||||
}
|
||||
cmdDBDrop(cmd, []string{})
|
||||
cmdDBSetup(cmd, []string{})
|
||||
}
|
||||
|
||||
func cmdDBCreate(cmd *cobra.Command, args []string) {
|
||||
initConfOnce()
|
||||
ctx := context.Background()
|
||||
|
||||
conn, err := initDB(conf, false)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to connect to database")
|
||||
errlog.Fatal().Err(err).Msg("failed to connect to database")
|
||||
}
|
||||
defer conn.Close(ctx)
|
||||
|
||||
sql := fmt.Sprintf("CREATE DATABASE %s", conf.DB.DBName)
|
||||
sql := fmt.Sprintf(`CREATE DATABASE "%s"`, conf.DB.DBName)
|
||||
|
||||
_, err = conn.Exec(ctx, sql)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to create database")
|
||||
errlog.Fatal().Err(err).Msg("failed to create database")
|
||||
}
|
||||
|
||||
logger.Info().Msgf("created database '%s'", conf.DB.DBName)
|
||||
}
|
||||
|
||||
func cmdDBDrop(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
|
||||
if conf, err = initConf(); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to read config")
|
||||
}
|
||||
|
||||
initConfOnce()
|
||||
ctx := context.Background()
|
||||
|
||||
conn, err := initDB(conf, false)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to connect to database")
|
||||
errlog.Fatal().Err(err).Msg("failed to connect to database")
|
||||
}
|
||||
defer conn.Close(ctx)
|
||||
|
||||
sql := fmt.Sprintf(`DROP DATABASE IF EXISTS %s`, conf.DB.DBName)
|
||||
sql := fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, conf.DB.DBName)
|
||||
|
||||
_, err = conn.Exec(ctx, sql)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to create database")
|
||||
errlog.Fatal().Err(err).Msg("failed to create database")
|
||||
}
|
||||
|
||||
logger.Info().Msgf("dropped database '%s'", conf.DB.DBName)
|
||||
@ -106,16 +95,11 @@ func cmdDBDrop(cmd *cobra.Command, args []string) {
|
||||
|
||||
func cmdDBNew(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
cmd.Help()
|
||||
cmd.Help() //nolint: errcheck
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if conf, err = initConf(); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to read config")
|
||||
}
|
||||
|
||||
initConfOnce()
|
||||
name := args[0]
|
||||
|
||||
m, err := migrate.FindMigrations(conf.MigrationsPath)
|
||||
@ -124,7 +108,7 @@ func cmdDBNew(cmd *cobra.Command, args []string) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
mname := fmt.Sprintf("%03d_%s.sql", len(m)+100, name)
|
||||
mname := fmt.Sprintf("%d_%s.sql", len(m), name)
|
||||
|
||||
// Write new migration
|
||||
mpath := filepath.Join(conf.MigrationsPath, mname)
|
||||
@ -144,39 +128,34 @@ func cmdDBNew(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
func cmdDBMigrate(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
|
||||
if len(args) == 0 {
|
||||
cmd.Help()
|
||||
cmd.Help() //nolint: errcheck
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
initConfOnce()
|
||||
dest := args[0]
|
||||
|
||||
if conf, err = initConf(); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to read config")
|
||||
}
|
||||
|
||||
conn, err := initDB(conf, true)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to connect to database")
|
||||
errlog.Fatal().Err(err).Msg("failed to connect to database")
|
||||
}
|
||||
defer conn.Close(context.Background())
|
||||
|
||||
m, err := migrate.NewMigrator(conn, "schema_version")
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to initializing migrator")
|
||||
errlog.Fatal().Err(err).Msg("failed to initializing migrator")
|
||||
}
|
||||
|
||||
m.Data = getMigrationVars()
|
||||
|
||||
err = m.LoadMigrations(conf.MigrationsPath)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to load migrations")
|
||||
errlog.Fatal().Err(err).Msg("failed to load migrations")
|
||||
}
|
||||
|
||||
if len(m.Migrations) == 0 {
|
||||
logger.Fatal().Msg("No migrations found")
|
||||
errlog.Fatal().Msg("No migrations found")
|
||||
}
|
||||
|
||||
m.OnStart = func(sequence int32, name, direction, sql string) {
|
||||
@ -195,7 +174,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
|
||||
var n int64
|
||||
n, err = strconv.ParseInt(d, 10, 32)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("invalid destination")
|
||||
errlog.Fatal().Err(err).Msg("invalid destination")
|
||||
}
|
||||
return int32(n)
|
||||
}
|
||||
@ -219,24 +198,22 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
|
||||
err = m.MigrateTo(currentVersion + mustParseDestination(dest[1:]))
|
||||
|
||||
} else {
|
||||
cmd.Help()
|
||||
cmd.Help() //nolint: errcheck
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Info().Err(err).Send()
|
||||
|
||||
// logger.Info().Err(err).Send()
|
||||
logger.Fatal().Err(err).Send()
|
||||
|
||||
// if err, ok := err.(m.MigrationPgError); ok {
|
||||
// if err.Detail != "" {
|
||||
// logger.Info().Err(err).Msg(err.Detail)
|
||||
// info.Err(err).Msg(err.Detail)
|
||||
// }
|
||||
|
||||
// if err.Position != 0 {
|
||||
// ele, err := ExtractErrorLine(err.Sql, int(err.Position))
|
||||
// if err != nil {
|
||||
// logger.Fatal().Err(err).Send()
|
||||
// errlog.Fatal().Err(err).Send()
|
||||
// }
|
||||
|
||||
// prefix := fmt.Sprintf()
|
||||
@ -251,37 +228,33 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
func cmdDBStatus(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
|
||||
if conf, err = initConf(); err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to read config")
|
||||
}
|
||||
initConfOnce()
|
||||
|
||||
conn, err := initDB(conf, true)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to connect to database")
|
||||
errlog.Fatal().Err(err).Msg("failed to connect to database")
|
||||
}
|
||||
defer conn.Close(context.Background())
|
||||
|
||||
m, err := migrate.NewMigrator(conn, "schema_version")
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to initialize migrator")
|
||||
errlog.Fatal().Err(err).Msg("failed to initialize migrator")
|
||||
}
|
||||
|
||||
m.Data = getMigrationVars()
|
||||
|
||||
err = m.LoadMigrations(conf.MigrationsPath)
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to load migrations")
|
||||
errlog.Fatal().Err(err).Msg("failed to load migrations")
|
||||
}
|
||||
|
||||
if len(m.Migrations) == 0 {
|
||||
logger.Fatal().Msg("no migrations found")
|
||||
errlog.Fatal().Msg("no migrations found")
|
||||
}
|
||||
|
||||
mver, err := m.GetCurrentVersion()
|
||||
if err != nil {
|
||||
logger.Fatal().Err(err).Msg("failed to retrieve migration")
|
||||
errlog.Fatal().Err(err).Msg("failed to retrieve migration")
|
||||
}
|
||||
|
||||
var status string
|
||||
@ -338,3 +311,13 @@ func getMigrationVars() map[string]interface{} {
|
||||
"env": strings.ToLower(os.Getenv("GO_ENV")),
|
||||
}
|
||||
}
|
||||
|
||||
func initConfOnce() {
|
||||
var err error
|
||||
|
||||
if conf == nil {
|
||||
if conf, err = initConf(); err != nil {
|
||||
errlog.Fatal().Err(err).Msg("failed to read config")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user