Compare commits
No commits in common. "master" and "v0.8" have entirely different histories.
@ -1,49 +0,0 @@
|
||||
{{ if .Versions -}}
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased]
|
||||
|
||||
{{ if .Unreleased.CommitGroups -}}
|
||||
{{ range .Unreleased.CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range .Versions }}
|
||||
<a name="{{ .Tag.Name }}"></a>
|
||||
## {{ if .Tag.Previous }}[{{ .Tag.Name }}]{{ else }}{{ .Tag.Name }}{{ end }} - {{ datetime "2006-01-02" .Tag.Date }}
|
||||
{{ range .CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .MergeCommits -}}
|
||||
### Pull Requests
|
||||
{{ range .MergeCommits -}}
|
||||
- {{ .Header }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .NoteGroups -}}
|
||||
{{ range .NoteGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Notes }}
|
||||
{{ .Body }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .Versions }}
|
||||
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
|
||||
{{ range .Versions -}}
|
||||
{{ if .Tag.Previous -}}
|
||||
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
@ -1,27 +0,0 @@
|
||||
style: github
|
||||
template: CHANGELOG.tpl.md
|
||||
info:
|
||||
title: CHANGELOG
|
||||
repository_url: https://github.com/dosco/super-graph
|
||||
options:
|
||||
commits:
|
||||
filters:
|
||||
Type:
|
||||
- feat
|
||||
- fix
|
||||
- perf
|
||||
- refactor
|
||||
commit_groups:
|
||||
title_maps:
|
||||
feat: Features
|
||||
fix: Bug Fixes
|
||||
perf: Performance Improvements
|
||||
refactor: Code Refactoring
|
||||
header:
|
||||
pattern: "^((\\w+)\\s.*)$"
|
||||
pattern_maps:
|
||||
- Subject
|
||||
- Type
|
||||
notes:
|
||||
keywords:
|
||||
- BREAKING CHANGE
|
@ -1,8 +0,0 @@
|
||||
version = 1
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_path = "github.com/dosco/super-graph"
|
24
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,24 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- If you suspect this could be a bug, follow the template. -->
|
||||
|
||||
### What version of Super Graph are you using? `super-graph version`
|
||||
|
||||
|
||||
### Have you tried reproducing the issue with the latest release?
|
||||
|
||||
|
||||
### What is the hardware spec (RAM, OS)?
|
||||
|
||||
|
||||
### Steps to reproduce the issue (config used to run Super Graph).
|
||||
|
||||
|
||||
### Expected behaviour and actual result.
|
12
.github/ISSUE_TEMPLATE/documentation.md
vendored
@ -1,12 +0,0 @@
|
||||
---
|
||||
name: Documentation
|
||||
about: Suggest how we can improve documentation
|
||||
title: ''
|
||||
labels: bug, docs
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- If you think the Super Graph documentation falls short https://supergraph.dev/guide.html please suggest ways we can improve it. -->
|
||||
|
||||
<!-- explain it here. -->
|
14
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,14 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please only use this template for submitting feature requests -->
|
||||
|
||||
**What would you like to be added**:
|
||||
|
||||
**Why is this needed**:
|
14
.gitignore
vendored
@ -23,20 +23,8 @@
|
||||
/tmp/runner-build
|
||||
/demo/tmp
|
||||
|
||||
.idea
|
||||
*.iml
|
||||
.vscode
|
||||
main
|
||||
.DS_Store
|
||||
.swp
|
||||
.release
|
||||
main
|
||||
super-graph
|
||||
*-fuzz.zip
|
||||
crashers
|
||||
suppressions
|
||||
release
|
||||
.gofuzz
|
||||
*-fuzz.zip
|
||||
*.test
|
||||
.firebase
|
||||
|
||||
|
13
.wtc.yaml
@ -1,13 +0,0 @@
|
||||
no_trace: false
|
||||
debounce: 300 # if rule has no debounce, this will be used instead
|
||||
ignore: \.git/
|
||||
trig: [start, run] # will run on start
|
||||
rules:
|
||||
- name: start
|
||||
- name: run
|
||||
match: \.go$
|
||||
ignore: web|examples|docs|_test\.go$
|
||||
command: go run main.go serv
|
||||
- name: test
|
||||
match: _test\.go$
|
||||
command: go test -cover {PKG}
|
394
CHANGELOG.md
@ -1,394 +0,0 @@
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased]
|
||||
|
||||
|
||||
<a name="v0.13.22"></a>
|
||||
## [v0.13.22] - 2020-05-01
|
||||
|
||||
<a name="v0.13.21"></a>
|
||||
## [v0.13.21] - 2020-04-24
|
||||
|
||||
<a name="v0.13.20"></a>
|
||||
## [v0.13.20] - 2020-04-24
|
||||
|
||||
<a name="v0.13.19"></a>
|
||||
## [v0.13.19] - 2020-04-23
|
||||
|
||||
<a name="v0.13.18"></a>
|
||||
## [v0.13.18] - 2020-04-23
|
||||
|
||||
<a name="v0.13.17"></a>
|
||||
## [v0.13.17] - 2020-04-22
|
||||
|
||||
<a name="v0.13.16"></a>
|
||||
## [v0.13.16] - 2020-04-21
|
||||
### Features
|
||||
- feat : improve the generated introspection schema and avoid the chirino/graphql api leaking through the core api. ([#53](https://github.com/dosco/super-graph/issues/53))
|
||||
|
||||
|
||||
<a name="v0.13.15"></a>
|
||||
## [v0.13.15] - 2020-04-20
|
||||
|
||||
<a name="v0.13.14"></a>
|
||||
## [v0.13.14] - 2020-04-19
|
||||
|
||||
<a name="v0.13.13"></a>
|
||||
## [v0.13.13] - 2020-04-19
|
||||
|
||||
<a name="v0.13.12"></a>
|
||||
## [v0.13.12] - 2020-04-19
|
||||
|
||||
<a name="v0.13.11"></a>
|
||||
## [v0.13.11] - 2020-04-18
|
||||
|
||||
<a name="v0.13.10"></a>
|
||||
## [v0.13.10] - 2020-04-17
|
||||
|
||||
<a name="v0.13.9"></a>
|
||||
## [v0.13.9] - 2020-04-16
|
||||
|
||||
<a name="v0.13.8"></a>
|
||||
## [v0.13.8] - 2020-04-16
|
||||
|
||||
<a name="v0.13.7"></a>
|
||||
## [v0.13.7] - 2020-04-16
|
||||
|
||||
<a name="v0.13.6"></a>
|
||||
## [v0.13.6] - 2020-04-13
|
||||
|
||||
<a name="v0.13.5"></a>
|
||||
## [v0.13.5] - 2020-04-13
|
||||
|
||||
<a name="v0.13.4"></a>
|
||||
## [v0.13.4] - 2020-04-12
|
||||
|
||||
<a name="v0.13.3"></a>
|
||||
## [v0.13.3] - 2020-04-12
|
||||
|
||||
<a name="v0.13.2"></a>
|
||||
## [v0.13.2] - 2020-04-11
|
||||
|
||||
<a name="v0.13.1"></a>
|
||||
## [v0.13.1] - 2020-04-11
|
||||
|
||||
<a name="v0.13.0"></a>
|
||||
## [v0.13.0] - 2020-04-10
|
||||
|
||||
<a name="v0.12.49"></a>
|
||||
## [v0.12.49] - 2020-04-01
|
||||
|
||||
<a name="v0.12.48"></a>
|
||||
## [v0.12.48] - 2020-03-31
|
||||
|
||||
<a name="v0.12.47"></a>
|
||||
## [v0.12.47] - 2020-03-30
|
||||
|
||||
<a name="v0.12.46"></a>
|
||||
## [v0.12.46] - 2020-03-21
|
||||
|
||||
<a name="v0.12.45"></a>
|
||||
## [v0.12.45] - 2020-03-18
|
||||
|
||||
<a name="v0.12.44"></a>
|
||||
## [v0.12.44] - 2020-03-16
|
||||
|
||||
<a name="v0.12.43"></a>
|
||||
## [v0.12.43] - 2020-03-16
|
||||
|
||||
<a name="v0.12.42"></a>
|
||||
## [v0.12.42] - 2020-03-14
|
||||
|
||||
<a name="v0.12.41"></a>
|
||||
## [v0.12.41] - 2020-03-06
|
||||
|
||||
<a name="v0.12.40"></a>
|
||||
## [v0.12.40] - 2020-03-06
|
||||
|
||||
<a name="v0.12.39"></a>
|
||||
## [v0.12.39] - 2020-03-06
|
||||
|
||||
<a name="v0.12.38"></a>
|
||||
## [v0.12.38] - 2020-03-05
|
||||
|
||||
<a name="v0.12.37"></a>
|
||||
## [v0.12.37] - 2020-03-04
|
||||
|
||||
<a name="v0.12.36"></a>
|
||||
## [v0.12.36] - 2020-03-04
|
||||
|
||||
<a name="v0.12.35"></a>
|
||||
## [v0.12.35] - 2020-03-03
|
||||
|
||||
<a name="v0.12.34"></a>
|
||||
## [v0.12.34] - 2020-03-03
|
||||
|
||||
<a name="v0.12.33"></a>
|
||||
## [v0.12.33] - 2020-02-29
|
||||
|
||||
<a name="v0.12.32"></a>
|
||||
## [v0.12.32] - 2020-02-24
|
||||
### Bug Fixes
|
||||
- fix "Try the demo app" in docs ([#38](https://github.com/dosco/super-graph/issues/38))
|
||||
|
||||
|
||||
<a name="v0.12.31"></a>
|
||||
## [v0.12.31] - 2020-02-23
|
||||
|
||||
<a name="v0.12.30"></a>
|
||||
## [v0.12.30] - 2020-02-23
|
||||
|
||||
<a name="v0.12.29"></a>
|
||||
## [v0.12.29] - 2020-02-21
|
||||
|
||||
<a name="v0.12.28"></a>
|
||||
## [v0.12.28] - 2020-02-20
|
||||
|
||||
<a name="v0.12.27"></a>
|
||||
## [v0.12.27] - 2020-02-19
|
||||
|
||||
<a name="v0.12.26"></a>
|
||||
## [v0.12.26] - 2020-02-11
|
||||
|
||||
<a name="v0.12.25"></a>
|
||||
## [v0.12.25] - 2020-02-10
|
||||
|
||||
<a name="v0.12.24"></a>
|
||||
## [v0.12.24] - 2020-02-03
|
||||
|
||||
<a name="v0.12.23"></a>
|
||||
## [v0.12.23] - 2020-02-02
|
||||
|
||||
<a name="v0.12.22"></a>
|
||||
## [v0.12.22] - 2020-02-01
|
||||
|
||||
<a name="v0.12.21"></a>
|
||||
## [v0.12.21] - 2020-01-31
|
||||
|
||||
<a name="v0.12.20"></a>
|
||||
## [v0.12.20] - 2020-01-28
|
||||
|
||||
<a name="v0.12.19"></a>
|
||||
## [v0.12.19] - 2020-01-26
|
||||
|
||||
<a name="v0.12.18"></a>
|
||||
## [v0.12.18] - 2020-01-20
|
||||
|
||||
<a name="v0.12.17"></a>
|
||||
## [v0.12.17] - 2020-01-20
|
||||
|
||||
<a name="v0.12.16"></a>
|
||||
## [v0.12.16] - 2020-01-19
|
||||
|
||||
<a name="v0.12.15"></a>
|
||||
## [v0.12.15] - 2020-01-17
|
||||
|
||||
<a name="v0.12.14"></a>
|
||||
## [v0.12.14] - 2020-01-17
|
||||
|
||||
<a name="v0.12.13"></a>
|
||||
## [v0.12.13] - 2020-01-16
|
||||
|
||||
<a name="v0.12.12"></a>
|
||||
## [v0.12.12] - 2020-01-15
|
||||
|
||||
<a name="v0.12.11"></a>
|
||||
## [v0.12.11] - 2020-01-14
|
||||
|
||||
<a name="v0.12.10"></a>
|
||||
## [v0.12.10] - 2020-01-14
|
||||
|
||||
<a name="v0.12.9"></a>
|
||||
## [v0.12.9] - 2020-01-14
|
||||
|
||||
<a name="v0.12.8"></a>
|
||||
## [v0.12.8] - 2020-01-13
|
||||
|
||||
<a name="v0.12.7"></a>
|
||||
## [v0.12.7] - 2020-01-11
|
||||
### Pull Requests
|
||||
- Merge pull request [#22](https://github.com/dosco/super-graph/issues/22) from bhaskarmurthy/fix-grammer-syntax
|
||||
|
||||
|
||||
<a name="v0.12.6"></a>
|
||||
## [v0.12.6] - 2019-12-02
|
||||
|
||||
<a name="v0.12.5"></a>
|
||||
## [v0.12.5] - 2019-11-30
|
||||
|
||||
<a name="v0.12.4"></a>
|
||||
## [v0.12.4] - 2019-11-28
|
||||
|
||||
<a name="v0.12.3"></a>
|
||||
## [v0.12.3] - 2019-11-26
|
||||
|
||||
<a name="v0.12.2"></a>
|
||||
## [v0.12.2] - 2019-11-25
|
||||
|
||||
<a name="v0.12.1"></a>
|
||||
## [v0.12.1] - 2019-11-22
|
||||
|
||||
<a name="v0.12.0"></a>
|
||||
## [v0.12.0] - 2019-11-22
|
||||
|
||||
<a name="v0.11.9"></a>
|
||||
## [v0.11.9] - 2019-11-22
|
||||
|
||||
<a name="v0.11.8"></a>
|
||||
## [v0.11.8] - 2019-11-21
|
||||
|
||||
<a name="v0.11.7"></a>
|
||||
## [v0.11.7] - 2019-11-19
|
||||
|
||||
<a name="v0.11.6"></a>
|
||||
## [v0.11.6] - 2019-11-15
|
||||
|
||||
<a name="v0.11.5"></a>
|
||||
## [v0.11.5] - 2019-11-10
|
||||
|
||||
<a name="v0.11.4"></a>
|
||||
## [v0.11.4] - 2019-11-10
|
||||
|
||||
<a name="v0.11.3"></a>
|
||||
## [v0.11.3] - 2019-11-09
|
||||
|
||||
<a name="v0.11.2"></a>
|
||||
## [v0.11.2] - 2019-11-07
|
||||
|
||||
<a name="v0.11.1"></a>
|
||||
## [v0.11.1] - 2019-11-05
|
||||
|
||||
<a name="v0.11"></a>
|
||||
## [v0.11] - 2019-11-01
|
||||
### Pull Requests
|
||||
- Merge pull request [#11](https://github.com/dosco/super-graph/issues/11) from dosco/rbac
|
||||
|
||||
|
||||
<a name="v0.10.1"></a>
|
||||
## [v0.10.1] - 2019-10-06
|
||||
### Pull Requests
|
||||
- Merge pull request [#10](https://github.com/dosco/super-graph/issues/10) from FourSigma/sm-examples-folder
|
||||
|
||||
|
||||
<a name="v0.10"></a>
|
||||
## [v0.10] - 2019-10-04
|
||||
### Pull Requests
|
||||
- Merge pull request [#6](https://github.com/dosco/super-graph/issues/6) from muesli/typo-fixes
|
||||
|
||||
|
||||
<a name="v0.9"></a>
|
||||
## [v0.9] - 2019-10-01
|
||||
|
||||
<a name="v0.8"></a>
|
||||
## [v0.8] - 2019-09-30
|
||||
|
||||
<a name="v0.7"></a>
|
||||
## [v0.7] - 2019-09-29
|
||||
|
||||
<a name="v0.6"></a>
|
||||
## [v0.6] - 2019-09-29
|
||||
|
||||
<a name="v0.5"></a>
|
||||
## [v0.5] - 2019-04-10
|
||||
|
||||
<a name="v0.4"></a>
|
||||
## [v0.4] - 2019-04-01
|
||||
|
||||
<a name="v0.3"></a>
|
||||
## [v0.3] - 2019-04-01
|
||||
|
||||
<a name="0.3"></a>
|
||||
## 0.3 - 2019-03-24
|
||||
|
||||
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.13.22...HEAD
|
||||
[v0.13.22]: https://github.com/dosco/super-graph/compare/v0.13.21...v0.13.22
|
||||
[v0.13.21]: https://github.com/dosco/super-graph/compare/v0.13.20...v0.13.21
|
||||
[v0.13.20]: https://github.com/dosco/super-graph/compare/v0.13.19...v0.13.20
|
||||
[v0.13.19]: https://github.com/dosco/super-graph/compare/v0.13.18...v0.13.19
|
||||
[v0.13.18]: https://github.com/dosco/super-graph/compare/v0.13.17...v0.13.18
|
||||
[v0.13.17]: https://github.com/dosco/super-graph/compare/v0.13.16...v0.13.17
|
||||
[v0.13.16]: https://github.com/dosco/super-graph/compare/v0.13.15...v0.13.16
|
||||
[v0.13.15]: https://github.com/dosco/super-graph/compare/v0.13.14...v0.13.15
|
||||
[v0.13.14]: https://github.com/dosco/super-graph/compare/v0.13.13...v0.13.14
|
||||
[v0.13.13]: https://github.com/dosco/super-graph/compare/v0.13.12...v0.13.13
|
||||
[v0.13.12]: https://github.com/dosco/super-graph/compare/v0.13.11...v0.13.12
|
||||
[v0.13.11]: https://github.com/dosco/super-graph/compare/v0.13.10...v0.13.11
|
||||
[v0.13.10]: https://github.com/dosco/super-graph/compare/v0.13.9...v0.13.10
|
||||
[v0.13.9]: https://github.com/dosco/super-graph/compare/v0.13.8...v0.13.9
|
||||
[v0.13.8]: https://github.com/dosco/super-graph/compare/v0.13.7...v0.13.8
|
||||
[v0.13.7]: https://github.com/dosco/super-graph/compare/v0.13.6...v0.13.7
|
||||
[v0.13.6]: https://github.com/dosco/super-graph/compare/v0.13.5...v0.13.6
|
||||
[v0.13.5]: https://github.com/dosco/super-graph/compare/v0.13.4...v0.13.5
|
||||
[v0.13.4]: https://github.com/dosco/super-graph/compare/v0.13.3...v0.13.4
|
||||
[v0.13.3]: https://github.com/dosco/super-graph/compare/v0.13.2...v0.13.3
|
||||
[v0.13.2]: https://github.com/dosco/super-graph/compare/v0.13.1...v0.13.2
|
||||
[v0.13.1]: https://github.com/dosco/super-graph/compare/v0.13.0...v0.13.1
|
||||
[v0.13.0]: https://github.com/dosco/super-graph/compare/v0.12.49...v0.13.0
|
||||
[v0.12.49]: https://github.com/dosco/super-graph/compare/v0.12.48...v0.12.49
|
||||
[v0.12.48]: https://github.com/dosco/super-graph/compare/v0.12.47...v0.12.48
|
||||
[v0.12.47]: https://github.com/dosco/super-graph/compare/v0.12.46...v0.12.47
|
||||
[v0.12.46]: https://github.com/dosco/super-graph/compare/v0.12.45...v0.12.46
|
||||
[v0.12.45]: https://github.com/dosco/super-graph/compare/v0.12.44...v0.12.45
|
||||
[v0.12.44]: https://github.com/dosco/super-graph/compare/v0.12.43...v0.12.44
|
||||
[v0.12.43]: https://github.com/dosco/super-graph/compare/v0.12.42...v0.12.43
|
||||
[v0.12.42]: https://github.com/dosco/super-graph/compare/v0.12.41...v0.12.42
|
||||
[v0.12.41]: https://github.com/dosco/super-graph/compare/v0.12.40...v0.12.41
|
||||
[v0.12.40]: https://github.com/dosco/super-graph/compare/v0.12.39...v0.12.40
|
||||
[v0.12.39]: https://github.com/dosco/super-graph/compare/v0.12.38...v0.12.39
|
||||
[v0.12.38]: https://github.com/dosco/super-graph/compare/v0.12.37...v0.12.38
|
||||
[v0.12.37]: https://github.com/dosco/super-graph/compare/v0.12.36...v0.12.37
|
||||
[v0.12.36]: https://github.com/dosco/super-graph/compare/v0.12.35...v0.12.36
|
||||
[v0.12.35]: https://github.com/dosco/super-graph/compare/v0.12.34...v0.12.35
|
||||
[v0.12.34]: https://github.com/dosco/super-graph/compare/v0.12.33...v0.12.34
|
||||
[v0.12.33]: https://github.com/dosco/super-graph/compare/v0.12.32...v0.12.33
|
||||
[v0.12.32]: https://github.com/dosco/super-graph/compare/v0.12.31...v0.12.32
|
||||
[v0.12.31]: https://github.com/dosco/super-graph/compare/v0.12.30...v0.12.31
|
||||
[v0.12.30]: https://github.com/dosco/super-graph/compare/v0.12.29...v0.12.30
|
||||
[v0.12.29]: https://github.com/dosco/super-graph/compare/v0.12.28...v0.12.29
|
||||
[v0.12.28]: https://github.com/dosco/super-graph/compare/v0.12.27...v0.12.28
|
||||
[v0.12.27]: https://github.com/dosco/super-graph/compare/v0.12.26...v0.12.27
|
||||
[v0.12.26]: https://github.com/dosco/super-graph/compare/v0.12.25...v0.12.26
|
||||
[v0.12.25]: https://github.com/dosco/super-graph/compare/v0.12.24...v0.12.25
|
||||
[v0.12.24]: https://github.com/dosco/super-graph/compare/v0.12.23...v0.12.24
|
||||
[v0.12.23]: https://github.com/dosco/super-graph/compare/v0.12.22...v0.12.23
|
||||
[v0.12.22]: https://github.com/dosco/super-graph/compare/v0.12.21...v0.12.22
|
||||
[v0.12.21]: https://github.com/dosco/super-graph/compare/v0.12.20...v0.12.21
|
||||
[v0.12.20]: https://github.com/dosco/super-graph/compare/v0.12.19...v0.12.20
|
||||
[v0.12.19]: https://github.com/dosco/super-graph/compare/v0.12.18...v0.12.19
|
||||
[v0.12.18]: https://github.com/dosco/super-graph/compare/v0.12.17...v0.12.18
|
||||
[v0.12.17]: https://github.com/dosco/super-graph/compare/v0.12.16...v0.12.17
|
||||
[v0.12.16]: https://github.com/dosco/super-graph/compare/v0.12.15...v0.12.16
|
||||
[v0.12.15]: https://github.com/dosco/super-graph/compare/v0.12.14...v0.12.15
|
||||
[v0.12.14]: https://github.com/dosco/super-graph/compare/v0.12.13...v0.12.14
|
||||
[v0.12.13]: https://github.com/dosco/super-graph/compare/v0.12.12...v0.12.13
|
||||
[v0.12.12]: https://github.com/dosco/super-graph/compare/v0.12.11...v0.12.12
|
||||
[v0.12.11]: https://github.com/dosco/super-graph/compare/v0.12.10...v0.12.11
|
||||
[v0.12.10]: https://github.com/dosco/super-graph/compare/v0.12.9...v0.12.10
|
||||
[v0.12.9]: https://github.com/dosco/super-graph/compare/v0.12.8...v0.12.9
|
||||
[v0.12.8]: https://github.com/dosco/super-graph/compare/v0.12.7...v0.12.8
|
||||
[v0.12.7]: https://github.com/dosco/super-graph/compare/v0.12.6...v0.12.7
|
||||
[v0.12.6]: https://github.com/dosco/super-graph/compare/v0.12.5...v0.12.6
|
||||
[v0.12.5]: https://github.com/dosco/super-graph/compare/v0.12.4...v0.12.5
|
||||
[v0.12.4]: https://github.com/dosco/super-graph/compare/v0.12.3...v0.12.4
|
||||
[v0.12.3]: https://github.com/dosco/super-graph/compare/v0.12.2...v0.12.3
|
||||
[v0.12.2]: https://github.com/dosco/super-graph/compare/v0.12.1...v0.12.2
|
||||
[v0.12.1]: https://github.com/dosco/super-graph/compare/v0.12.0...v0.12.1
|
||||
[v0.12.0]: https://github.com/dosco/super-graph/compare/v0.11.9...v0.12.0
|
||||
[v0.11.9]: https://github.com/dosco/super-graph/compare/v0.11.8...v0.11.9
|
||||
[v0.11.8]: https://github.com/dosco/super-graph/compare/v0.11.7...v0.11.8
|
||||
[v0.11.7]: https://github.com/dosco/super-graph/compare/v0.11.6...v0.11.7
|
||||
[v0.11.6]: https://github.com/dosco/super-graph/compare/v0.11.5...v0.11.6
|
||||
[v0.11.5]: https://github.com/dosco/super-graph/compare/v0.11.4...v0.11.5
|
||||
[v0.11.4]: https://github.com/dosco/super-graph/compare/v0.11.3...v0.11.4
|
||||
[v0.11.3]: https://github.com/dosco/super-graph/compare/v0.11.2...v0.11.3
|
||||
[v0.11.2]: https://github.com/dosco/super-graph/compare/v0.11.1...v0.11.2
|
||||
[v0.11.1]: https://github.com/dosco/super-graph/compare/v0.11...v0.11.1
|
||||
[v0.11]: https://github.com/dosco/super-graph/compare/v0.10.1...v0.11
|
||||
[v0.10.1]: https://github.com/dosco/super-graph/compare/v0.10...v0.10.1
|
||||
[v0.10]: https://github.com/dosco/super-graph/compare/v0.9...v0.10
|
||||
[v0.9]: https://github.com/dosco/super-graph/compare/v0.8...v0.9
|
||||
[v0.8]: https://github.com/dosco/super-graph/compare/v0.7...v0.8
|
||||
[v0.7]: https://github.com/dosco/super-graph/compare/v0.6...v0.7
|
||||
[v0.6]: https://github.com/dosco/super-graph/compare/v0.5...v0.6
|
||||
[v0.5]: https://github.com/dosco/super-graph/compare/v0.4...v0.5
|
||||
[v0.4]: https://github.com/dosco/super-graph/compare/v0.3...v0.4
|
||||
[v0.3]: https://github.com/dosco/super-graph/compare/0.3...v0.3
|
@ -1,3 +0,0 @@
|
||||
# Code of Conduct
|
||||
|
||||
Be excellent to each other. Treat others the way you'd like to be treated. We are all here to learn, build great software and make new friends.
|
@ -1,82 +0,0 @@
|
||||
# Contributing to Super Graph
|
||||
|
||||
Super Graph is a very approchable code-base and a project that is easy for almost
|
||||
anyone with basic GO knowledge to start contributing to. It is also a young project
|
||||
so a lot of high value work is there for the taking.
|
||||
|
||||
Even the GraphQL to SQL compiler that is at the heart of Super Graph is essentially a text book compiler with clean and easy to read code. The data structures used by the lexer, parser and sql generator are easy to understand and modify.
|
||||
|
||||
Finally we do have a lot of test for critical parts of the codebase which makes it easy for you to modify with confidence. I'm always available for questions or any sort of guidance so feel fee to reach out over twitter or discord.
|
||||
|
||||
* [Getting Started](#getting-started)
|
||||
* [Setting Up the Development Environment](#setup-development-environment)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Get the Super Graph source](#get-source-code)
|
||||
* [Start the development envoirnment ](#start-the-development-envoirnment)
|
||||
* [Testing](#testing-and-linting)
|
||||
* [Contributing](#contributing)
|
||||
* [Guidelines](#guidelines)
|
||||
* [Code style](#code-style)
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Read the [Getting Started Guide](https://supergraph.dev/guide.html#get-started)
|
||||
|
||||
## Setup Development Environment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Install [Git](https://git-scm.com/) (may be already installed on your system, or available through your OS package manager)
|
||||
- Install [Go 1.13 or above](https://golang.org/doc/install)
|
||||
- Install [Docker](https://docs.docker.com/v17.09/engine/installation/)
|
||||
|
||||
### Get source code
|
||||
|
||||
The entire build flow uses `Makefile` there is a whole list of sub-commands you
|
||||
can use to build, test, install, lint, etc.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/dosco/super-graph
|
||||
cd ./super-graph
|
||||
make help
|
||||
```
|
||||
|
||||
### Start the development envoirnment
|
||||
|
||||
The entire development flow is packaged into a `docker-compose` work flow. The below `up` command will launch A Postgres database, a example e-commerce app in Rails and Super Graph in development mode. The `db:seed` Rails task will insert sample data into Postgres.
|
||||
|
||||
```bash
|
||||
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
### Learn how the code works
|
||||
|
||||
[Super Graph codebase explained](https://supergraph.dev/internals.html)
|
||||
|
||||
### Testing and Linting
|
||||
|
||||
```
|
||||
make lint test
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
### Guidelines
|
||||
|
||||
- **Pull requests are welcome**, as long as you're willing to put in the effort to meet the guidelines.
|
||||
- Aim for clear, well written, maintainable code.
|
||||
- Simple and minimal approach to features, like Go.
|
||||
- Refactoring existing code now for better performance, better readability or better testability wins over adding a new feature.
|
||||
- Don't add a function to a module that you don't use right now, or doesn't clearly enable a planned functionality.
|
||||
- Don't ship a half done feature, which would require significant alterations to work fully.
|
||||
- Avoid [Technical debt](https://en.wikipedia.org/wiki/Technical_debt) like cancer.
|
||||
- Leave the code cleaner than when you began.
|
||||
|
||||
### Code style
|
||||
|
||||
- We're following [Go Code Review](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
- Use `go fmt` to format your code before committing.
|
||||
- If you see *any code* which clearly violates the style guide, please fix it and send a pull request. No need to ask for permission.
|
||||
- Avoid unnecessary vertical spaces. Use your judgment or follow the code review comments.
|
||||
- Wrap your code and comments to 100 characters, unless doing so makes the code less legible.
|
42
Dockerfile
@ -1,38 +1,33 @@
|
||||
# stage: 1
|
||||
FROM node:10 as react-build
|
||||
WORKDIR /web
|
||||
COPY /internal/serv/web/ ./
|
||||
COPY web/ ./
|
||||
RUN yarn
|
||||
RUN yarn build
|
||||
|
||||
|
||||
|
||||
# stage: 2
|
||||
FROM golang:1.14-alpine as go-build
|
||||
FROM golang:1.13beta1-alpine as go-build
|
||||
RUN apk update && \
|
||||
apk add --no-cache make && \
|
||||
apk add --no-cache git && \
|
||||
apk add --no-cache jq
|
||||
apk add --no-cache upx=3.95-r2
|
||||
|
||||
RUN GO111MODULE=off go get -u github.com/rafaelsq/wtc
|
||||
|
||||
ARG SOPS_VERSION=3.5.0
|
||||
ADD https://github.com/mozilla/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.linux /usr/local/bin/sops
|
||||
RUN chmod 755 /usr/local/bin/sops
|
||||
RUN go get -u github.com/shanzi/wu && \
|
||||
go install github.com/shanzi/wu && \
|
||||
go get github.com/GeertJohan/go.rice/rice
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
|
||||
RUN mkdir -p /app/internal/serv/web/build
|
||||
COPY --from=react-build /web/build/ ./internal/serv/web/build
|
||||
RUN mkdir -p /app/web/build
|
||||
COPY --from=react-build /web/build/ ./web/build/
|
||||
|
||||
ENV GO111MODULE=on
|
||||
RUN go mod vendor
|
||||
RUN make build
|
||||
# RUN echo "Compressing binary, will take a bit of time..." && \
|
||||
# upx --ultra-brute -qq super-graph && \
|
||||
# upx -t super-graph
|
||||
|
||||
|
||||
RUN go generate ./... && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o super-graph && \
|
||||
echo "Compressing binary, will take a bit of time..." && \
|
||||
upx --ultra-brute -qq super-graph && \
|
||||
upx -t super-graph
|
||||
|
||||
# stage: 3
|
||||
FROM alpine:latest
|
||||
@ -44,15 +39,10 @@ RUN mkdir -p /config
|
||||
COPY --from=go-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=go-build /app/config/* /config/
|
||||
COPY --from=go-build /app/super-graph .
|
||||
COPY --from=go-build /app/internal/scripts/start.sh .
|
||||
COPY --from=go-build /usr/local/bin/sops .
|
||||
|
||||
RUN chmod +x /super-graph
|
||||
RUN chmod +x /start.sh
|
||||
|
||||
USER nobody
|
||||
|
||||
ENV GO_ENV production
|
||||
EXPOSE 8080
|
||||
|
||||
ENTRYPOINT ["./start.sh"]
|
||||
CMD ["./super-graph", "serv"]
|
||||
CMD ./super-graph serv
|
||||
|
189
LICENSE
@ -1,176 +1,21 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
The MIT License (MIT)
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
Copyright (c) 2019-present Vikram Rangnekar. twitter.com/dosco
|
||||
|
||||
1. Definitions.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
108
Makefile
@ -1,108 +0,0 @@
|
||||
BUILD ?= $(shell git rev-parse --short HEAD)
|
||||
BUILD_DATE ?= $(shell git log -1 --format=%ci)
|
||||
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BUILD_VERSION ?= $(shell git describe --always --tags)
|
||||
|
||||
GOPATH ?= $(shell go env GOPATH)
|
||||
|
||||
ifndef GOPATH
|
||||
override GOPATH = $(HOME)/go
|
||||
endif
|
||||
|
||||
export GO111MODULE := on
|
||||
|
||||
# Build-time Go variables
|
||||
version = github.com/dosco/super-graph/internal/serv.version
|
||||
gitBranch = github.com/dosco/super-graph/internal/serv.gitBranch
|
||||
lastCommitSHA = github.com/dosco/super-graph/internal/serv.lastCommitSHA
|
||||
lastCommitTime = github.com/dosco/super-graph/internal/serv.lastCommitTime
|
||||
|
||||
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
|
||||
|
||||
.PHONY: all build gen clean test run lint changlog release version help $(PLATFORMS)
|
||||
|
||||
test:
|
||||
@go test -v -short -race ./...
|
||||
|
||||
BIN_DIR := $(GOPATH)/bin
|
||||
GORICE := $(BIN_DIR)/rice
|
||||
GOLANGCILINT := $(BIN_DIR)/golangci-lint
|
||||
GITCHGLOG := $(BIN_DIR)/git-chglog
|
||||
WEB_BUILD_DIR := ./internal/serv/web/build/manifest.json
|
||||
|
||||
$(GORICE):
|
||||
@GO111MODULE=off go get -u github.com/GeertJohan/go.rice/rice
|
||||
|
||||
$(WEB_BUILD_DIR):
|
||||
@echo "First install Yarn and create a build of the web UI then re-run make install"
|
||||
@echo "Run this command: yarn --cwd internal/serv/web/ build"
|
||||
@exit 1
|
||||
|
||||
$(GITCHGLOG):
|
||||
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/cmd/git-chglog
|
||||
|
||||
changelog: $(GITCHGLOG)
|
||||
@git-chglog $(ARGS)
|
||||
|
||||
$(GOLANGCILINT):
|
||||
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.25.1
|
||||
|
||||
lint: $(GOLANGCILINT)
|
||||
@golangci-lint run ./... --skip-dirs-use-default
|
||||
|
||||
BINARY := super-graph
|
||||
LDFLAGS := -s -w
|
||||
PLATFORMS := windows linux darwin
|
||||
os = $(word 1, $@)
|
||||
|
||||
$(PLATFORMS): lint test
|
||||
@mkdir -p release
|
||||
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64 main.go
|
||||
|
||||
release: windows linux darwin
|
||||
|
||||
all: lint test $(BINARY)
|
||||
|
||||
build: $(BINARY)
|
||||
|
||||
gen: $(GORICE) $(WEB_BUILD_DIR)
|
||||
@go generate ./...
|
||||
|
||||
$(BINARY): clean
|
||||
@go build $(BUILD_FLAGS) -o $(BINARY) main.go
|
||||
|
||||
clean:
|
||||
@rm -f $(BINARY)
|
||||
|
||||
run: clean
|
||||
@go run $(BUILD_FLAGS) main.go $(ARGS)
|
||||
|
||||
install: clean build
|
||||
@echo "Commit Hash: `git rev-parse HEAD`"
|
||||
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
|
||||
@mv $(BINARY) $(GOPATH)/bin/$(BINARY)
|
||||
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
|
||||
|
||||
uninstall: clean
|
||||
@go clean -i -x
|
||||
|
||||
version:
|
||||
@echo Super Graph ${BUILD_VERSION}
|
||||
@echo Build: ${BUILD}
|
||||
@echo Build date: ${BUILD_DATE}
|
||||
@echo Branch: ${BUILD_BRANCH}
|
||||
@echo Go version: $(shell go version)
|
||||
|
||||
help:
|
||||
@echo
|
||||
@echo Build commands:
|
||||
@echo " make build - Build supergraph binary"
|
||||
@echo " make install - Install supergraph binary"
|
||||
@echo " make uninstall - Uninstall supergraph binary"
|
||||
@echo " make [platform] - Build for platform [linux|darwin|windows]"
|
||||
@echo " make release - Build all platforms"
|
||||
@echo " make run - Run supergraph (eg. make run ARGS=\"help\")"
|
||||
@echo " make test - Run all tests"
|
||||
@echo " make changelog - Generate changelog (eg. make changelog ARGS=\"help\")"
|
||||
@echo " make help - This help"
|
||||
@echo
|
13
NOTICE
@ -1,13 +0,0 @@
|
||||
Copyright 2019 Vikram Rangnekar
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
121
README.md
@ -1,114 +1,53 @@
|
||||
<img src="docs/website/static/img/super-graph-logo.svg" width="80" />
|
||||
<a href="https://supergraph.dev"><img src="https://supergraph.dev/logo.svg" width="100" height="100" align="right" /></a>
|
||||
|
||||
# Super Graph - Fetch data without code!
|
||||
# Super Graph - Instant GraphQL API for Rails
|
||||
|
||||
[](https://pkg.go.dev/github.com/dosco/super-graph/core?tab=doc)
|
||||

|
||||

|
||||
[](https://discord.gg/6pSWCTZ)
|
||||

|
||||

|
||||

|
||||
|
||||
Super Graph gives you a high performance GraphQL API without you having to write any code. GraphQL is automagically compiled into an efficient SQL query. Use it either as a library or a standalone service.
|
||||
Get an high-performance GraphQL API for your Rails app in seconds without writing a line of code. Super Graph will auto-learn your database structure and relationships. Built in support for Rails authentication and JWT tokens.
|
||||
|
||||
## Using it as a service
|
||||

|
||||
|
||||
```console
|
||||
go get github.com/dosco/super-graph
|
||||
super-graph new <app_name>
|
||||
```
|
||||
## Why I built Super Graph?
|
||||
|
||||
## Using it in your own code
|
||||
Honestly, cause it was more fun than my real work. After working on several product though my career I found myself hating building CRUD APIs (Create, Update, Delete, List, Show). It was always the same thing figure out what the UI needs then build an endpoint for it, if related data is needed than join with another table. I didn't want to write that code anymore I wanted the computer to just do it.
|
||||
|
||||
```console
|
||||
go get github.com/dosco/super-graph/core
|
||||
```
|
||||
I always liked GraphQL it sounded friendly, but it still required me to write all the same database query code. Sure the API was nicer but it took a lot of work sometime even more than a simple REST API would have. I wanted a GraphQL server that just worked the second you deployed it without having to write a line of code.
|
||||
|
||||
```golang
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/dosco/super-graph/core"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
posts {
|
||||
id
|
||||
title
|
||||
}
|
||||
}`
|
||||
|
||||
ctx = context.WithValue(ctx, core.UserIDKey, 1)
|
||||
|
||||
res, err := sg.GraphQL(ctx, query, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
}
|
||||
```
|
||||
|
||||
## About Super Graph
|
||||
|
||||
After working on several products through my career I found that we spend way too much time on building API backends. Most APIs also need constant updating, and this costs time and money.
|
||||
|
||||
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.
|
||||
|
||||
I didn't want to write this code anymore, I wanted the computer to do it. Enter GraphQL, to me it sounded great, but it still required me to write all the same database query code.
|
||||
|
||||
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.
|
||||
|
||||
This compiler is what sits at the heart of Super Graph, with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations, and everything else needed for you to build production-ready apps with it.
|
||||
And so after a lot of coffee and some Avocado toasts __Super Graph was born, a GraphQL server that just works, is high performance and easy to deploy__. I hope you find it as useful as I do and there's a lot more coming so hit that :star: to stay in the loop.
|
||||
|
||||
## Features
|
||||
|
||||
- Complex nested queries and mutations
|
||||
- Auto learns database tables and relationships
|
||||
- Role and Attribute-based access control
|
||||
- Opaque cursor-based efficient pagination
|
||||
- Full-text search and aggregations
|
||||
- Works with Rails database schemas
|
||||
- Automatically learns schemas and relationships
|
||||
- Belongs-To, One-To-Many and Many-To-Many table relationships
|
||||
- Full text search and Aggregations
|
||||
- Rails Auth supported (Redis, Memcache, Cookie)
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join database queries with remote REST APIs
|
||||
- Also works with existing Ruby-On-Rails apps
|
||||
- Rails authentication supported (Redis, Memcache, Cookie)
|
||||
- A simple config file
|
||||
- High performance Go codebase
|
||||
- Join with remote REST APIs
|
||||
- Highly optimized and fast Postgres SQL queries
|
||||
- Configure with a simple config file
|
||||
- High performance GO codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Fuzz tested for security
|
||||
- Database migrations tool
|
||||
- Database seeding tool
|
||||
- Works with Postgres and Yugabyte DB
|
||||
- OpenCensus Support: Zipkin, Prometheus, X-Ray, Stackdriver
|
||||
|
||||
## Watch some talks
|
||||
|
||||
[](https://youtu.be/TGq9wJAj78I)
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
[supergraph.dev](https://supergraph.dev)
|
||||
|
||||
## Reach out
|
||||
## Contact me
|
||||
|
||||
We're happy to help you leverage Super Graph reach out if you have questions
|
||||
|
||||
[twitter/dosco](https://twitter.com/dosco)
|
||||
|
||||
[chat/super-graph](https://discord.gg/6pSWCTZ)
|
||||
[twitter.com/dosco](https://twitter.com/dosco)
|
||||
|
||||
## License
|
||||
|
||||
[Apache Public License 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
[MIT](http://opensource.org/licenses/MIT)
|
||||
|
||||
Copyright (c) 2019-present Vikram Rangnekar
|
||||
|
||||
|
||||
|
@ -1,127 +1,21 @@
|
||||
# http://localhost:8080/
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Protect Ya Neck",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Enter the Wu-Tang",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Wu-Tang",
|
||||
"description": "No description needed"
|
||||
"name": "Hellooooo",
|
||||
"description": "World"
|
||||
},
|
||||
"product_id": 1
|
||||
"user": 123
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, update: $update) {
|
||||
products(update: $update, where: {id: {eq: 134}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
id
|
||||
email
|
||||
picture: avatar
|
||||
products(limit: 2, where: {price: {gt: 10}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Gumbo1",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Gumbo2",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: 199, delete: true) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user {
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"product_id": 5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, delete: true) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "gfk@myspace.com",
|
||||
"full_name": "Ghostface Killah",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Helloo",
|
||||
@ -138,618 +32,38 @@ mutation {
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "WOOO",
|
||||
"price": 50.5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query getProducts {
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
deals {
|
||||
me {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"beer": "smoke"
|
||||
}
|
||||
|
||||
query beerSearch {
|
||||
products(search: $beer) {
|
||||
id
|
||||
name
|
||||
search_rank
|
||||
search_headline_description
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "goo1@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "goo12@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": [
|
||||
{
|
||||
"name": "Banana 1",
|
||||
"price": 1.1,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Banana 2",
|
||||
"price": 2.2,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
products {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Banana 3",
|
||||
"price": 1.1,
|
||||
"update": {
|
||||
"name": "Hellooooo",
|
||||
"description": "World",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "a2@a.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
"updated_at": "now"
|
||||
},
|
||||
"user": 123
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
price
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name",
|
||||
"description": "my_desc"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(id: 15, update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name",
|
||||
"description": "my_desc"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $update, where: {id: {eq: 1}}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "my_name 2",
|
||||
"description": "my_desc 2"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $update, where: {id: {eq: 1}}) {
|
||||
products(update: $update, where: {id: {eq: 134}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"sale_type": "tuutuu",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude1@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
purchase(update: $data, id: 5) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(update: $data, where: {id: {eq: 8}}) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user(where: {id: {eq: 8}}) {
|
||||
me {
|
||||
id
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "thedude@rug.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
user {
|
||||
email
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "booboo@demo.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 6) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "booboo@demo.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
product(id: 6) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thedude123@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": {
|
||||
"id": 7
|
||||
},
|
||||
"disconnect": {
|
||||
"id": 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(update: $data, id: 6) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5,
|
||||
"email": "test@test.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "thed44ude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 6
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Coconut",
|
||||
"price": 2.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Coconut",
|
||||
"price": 2.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5,
|
||||
"email": "test@test.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 9) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": {
|
||||
"id": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
product(update: $data, id: 2) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
215
config/dev.yml
@ -2,20 +2,18 @@ app_name: "Super Graph Development"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: true
|
||||
|
||||
# debug, error, warn, info, none
|
||||
# debug, info, warn, error, fatal, panic
|
||||
log_level: "debug"
|
||||
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: false
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: false
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
auth_fail_block: false
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: never
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
@ -30,27 +28,7 @@ reload_on_config_change: true
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./migrations
|
||||
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
secret_key: supercalifajalistics
|
||||
|
||||
# CORS: A list of origins a cross-domain request can be executed from.
|
||||
# If the special * value is present in the list, all origins will be allowed.
|
||||
# An origin may contain a wildcard (*) to replace 0 or more
|
||||
# characters (i.e.: http://*.domain.com).
|
||||
cors_allowed_origins: ["*"]
|
||||
|
||||
# Debug Cross Origin Resource Sharing requests
|
||||
cors_debug: true
|
||||
|
||||
# Default API path prefix is /api you can change it if you like
|
||||
# api_path: "/data"
|
||||
|
||||
# Cache-Control header can help cache queries if your CDN supports cache-control
|
||||
# on POST requests (does not work with not mutations)
|
||||
# cache_control: "public, max-age=300, s-maxage=600"
|
||||
migrations_path: ./config/migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
@ -68,27 +46,14 @@ cors_debug: true
|
||||
# person: people
|
||||
# sheep: sheep
|
||||
|
||||
# open opencensus tracing and metrics
|
||||
# telemetry:
|
||||
# debug: true
|
||||
# metrics:
|
||||
# exporter: "prometheus"
|
||||
# tracing:
|
||||
# exporter: "zipkin"
|
||||
# endpoint: "http://zipkin:9411/api/v2/spans"
|
||||
# sample: 0.2
|
||||
# include_query: false
|
||||
# include_params: false
|
||||
|
||||
auth:
|
||||
# Can be 'rails' or 'jwt'
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header for testing.
|
||||
# Disable in production
|
||||
creds_in_header: true
|
||||
# the user_id via a header. Good for testing
|
||||
header: X-User-ID
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
@ -103,6 +68,7 @@ auth:
|
||||
# password: ""
|
||||
# max_idle: 80
|
||||
# max_active: 12000
|
||||
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
# sign_salt: "signed encrypted cookie"
|
||||
@ -120,124 +86,67 @@ database:
|
||||
port: 5432
|
||||
dbname: app_development
|
||||
user: postgres
|
||||
password: postgres
|
||||
password: ''
|
||||
|
||||
#schema: "public"
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
# Define variables here that you want to use in filters
|
||||
# sub-queries must be wrapped in ()
|
||||
variables:
|
||||
account_id: "(select account_id from users where id = $user_id)"
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 1m
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
# filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Define additional variables here to be used with filters
|
||||
variables:
|
||||
admin_account_id: "5"
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
# filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
tables:
|
||||
- name: customers
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
pass_headers:
|
||||
- cookie
|
||||
set_headers:
|
||||
- name: Host
|
||||
value: 0.0.0.0
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
# - name: products
|
||||
# # Multiple filters are AND'd together
|
||||
# filter: [
|
||||
# "{ price: { gt: 0 } }",
|
||||
# "{ price: { lt: 8 } }"
|
||||
# ]
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
- name: customers
|
||||
# No filter is used for this field not
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
- name: deals
|
||||
table: products
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
pass_headers:
|
||||
- cookie
|
||||
set_headers:
|
||||
- name: Host
|
||||
value: 0.0.0.0
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- name: users
|
||||
columns:
|
||||
- name: email
|
||||
related_to: products.name
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
roles_query: "SELECT * FROM users WHERE id = $user_id"
|
||||
|
||||
roles:
|
||||
- name: anon
|
||||
tables:
|
||||
- name: products
|
||||
query:
|
||||
limit: 10
|
||||
columns: ["id", "name", "description"]
|
||||
aggregation: false
|
||||
|
||||
insert:
|
||||
block: false
|
||||
|
||||
update:
|
||||
block: false
|
||||
|
||||
delete:
|
||||
block: false
|
||||
|
||||
- name: deals
|
||||
query:
|
||||
limit: 3
|
||||
aggregation: false
|
||||
|
||||
- name: purchases
|
||||
query:
|
||||
limit: 3
|
||||
aggregation: false
|
||||
|
||||
- name: user
|
||||
tables:
|
||||
- name: users
|
||||
query:
|
||||
filters: ["{ id: { _eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
query:
|
||||
limit: 50
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
disable_functions: false
|
||||
|
||||
insert:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
presets:
|
||||
- user_id: "$user_id"
|
||||
- created_at: "now"
|
||||
- updated_at: "now"
|
||||
|
||||
update:
|
||||
filters: ["{ user_id: { eq: $user_id } }"]
|
||||
columns:
|
||||
- id
|
||||
- name
|
||||
presets:
|
||||
- updated_at: "now"
|
||||
|
||||
delete:
|
||||
block: true
|
||||
|
||||
- name: admin
|
||||
match: id = 1000
|
||||
tables:
|
||||
- name: users
|
||||
filters: []
|
||||
# - name: posts
|
||||
# filter: ["{ account_id: { _eq: $account_id } }"]
|
140
config/prod.yml
@ -1,25 +1,19 @@
|
||||
# Inherit config from this other config file
|
||||
# so I only need to overwrite some values
|
||||
inherits: dev
|
||||
|
||||
app_name: "Super Graph Production"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: false
|
||||
|
||||
# debug, error, warn, info, none
|
||||
# debug, info, warn, error, fatal, panic, disable
|
||||
log_level: "info"
|
||||
|
||||
# enable or disable http compression (uses gzip)
|
||||
http_compress: true
|
||||
|
||||
# When production mode is 'true' only queries
|
||||
# from the allow list are permitted.
|
||||
# When it's 'false' all queries are saved to the
|
||||
# the allow list in ./config/allow.list
|
||||
production: true
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: true
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
auth_fail_block: true
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: always
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
@ -30,11 +24,7 @@ enable_tracing: true
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
# migrations_path: ./migrations
|
||||
|
||||
# Secret key for general encryption operations like
|
||||
# encrypting the cursor data
|
||||
# secret_key: supercalifajalistics
|
||||
# migrations_path: migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
@ -48,29 +38,107 @@ enable_tracing: true
|
||||
# SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
# SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
|
||||
# inflections:
|
||||
# person: people
|
||||
# sheep: sheep
|
||||
|
||||
auth:
|
||||
# Can be 'rails' or 'jwt'
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
# Comment this out if you want to disable setting
|
||||
# the user_id via a header. Good for testing
|
||||
header: X-User-ID
|
||||
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
# Remote cookie store. (memcache or redis)
|
||||
# url: redis://127.0.0.1:6379
|
||||
# password: test
|
||||
# max_idle: 80,
|
||||
# max_active: 12000,
|
||||
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
# sign_salt: "signed encrypted cookie"
|
||||
# auth_salt: "authenticated encrypted cookie"
|
||||
|
||||
# jwt:
|
||||
# provider: auth0
|
||||
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
# public_key_file: /secrets/public_key.pem
|
||||
# public_key_type: ecdsa #rsa
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: app_production
|
||||
dbname: {{app_name_slug}}_development
|
||||
user: postgres
|
||||
password: postgres
|
||||
password: ''
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
#log_level: "debug"
|
||||
|
||||
# Set session variable "user.id" to the user id
|
||||
# Enable this if you need the user id in triggers, etc
|
||||
set_user_id: false
|
||||
# Define variables here that you want to use in filters
|
||||
# sub-queries must be wrapped in ()
|
||||
variables:
|
||||
account_id: "(select account_id from users where id = $user_id)"
|
||||
|
||||
# database ping timeout is used for db health checking
|
||||
ping_timeout: 5m
|
||||
# open opencensus tracing and metrics
|
||||
# telemetry:
|
||||
# debug: false
|
||||
# metrics:
|
||||
# exporter: "prometheus"
|
||||
# tracing:
|
||||
# exporter: "zipkin"
|
||||
# endpoint: "http://zipkin:9411/api/v2/spans"
|
||||
# sample: 0.6
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
- password
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
# Multiple filters are AND'd together
|
||||
filter: [
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }"
|
||||
]
|
||||
|
||||
- name: customers
|
||||
# No filter is used for this field not
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
# remotes:
|
||||
# - name: payments
|
||||
# id: stripe_id
|
||||
# url: http://rails_app:3000/stripe/$id
|
||||
# path: data
|
||||
# # pass_headers:
|
||||
# # - cookie
|
||||
# # - host
|
||||
# set_headers:
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
table: users
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
# - name: posts
|
||||
# filter: ["{ account_id: { _eq: $account_id } }"]
|
@ -11,7 +11,7 @@ for (i = 0; i < user_count; i++) {
|
||||
var pwd = fake.password()
|
||||
var data = {
|
||||
full_name: fake.name(),
|
||||
avatar: fake.avatar_url(200),
|
||||
avatar: fake.image_url(),
|
||||
phone: fake.phone(),
|
||||
email: fake.email(),
|
||||
password: pwd,
|
||||
@ -46,10 +46,10 @@ for (i = 0; i < product_count; i++) {
|
||||
var data = {
|
||||
name: fake.beer_name(),
|
||||
description: desc,
|
||||
price: fake.price()
|
||||
//user_id: user.id,
|
||||
//created_at: "now",
|
||||
//updated_at: "now"
|
||||
price: fake.price(),
|
||||
user_id: user.id,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
@ -57,9 +57,7 @@ for (i = 0; i < product_count; i++) {
|
||||
product(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data }, {
|
||||
user_id: 5
|
||||
})
|
||||
}", { data: data })
|
||||
products.push(res.product)
|
||||
}
|
||||
|
||||
|
228
core/api.go
@ -1,228 +0,0 @@
|
||||
// Package core provides the primary API to include and use Super Graph with your own code.
|
||||
// For detailed documentation visit https://supergraph.dev
|
||||
//
|
||||
// Example usage:
|
||||
/*
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
"github.com/dosco/super-graph/core"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("pgx", "postgres://postgrs:@localhost:5432/example_db")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sg, err := core.NewSuperGraph(nil, db)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
posts {
|
||||
id
|
||||
title
|
||||
}
|
||||
}`
|
||||
|
||||
ctx = context.WithValue(ctx, core.UserIDKey, 1)
|
||||
|
||||
res, err := sg.GraphQL(ctx, query, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(res.Data))
|
||||
}
|
||||
*/
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"hash/maphash"
|
||||
_log "log"
|
||||
"os"
|
||||
|
||||
"github.com/chirino/graphql"
|
||||
"github.com/dosco/super-graph/core/internal/allow"
|
||||
"github.com/dosco/super-graph/core/internal/crypto"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
type contextkey int
|
||||
|
||||
// Constants to set values on the context passed to the NewSuperGraph function
|
||||
const (
|
||||
// Name of the authentication provider. Eg. google, github, etc
|
||||
UserIDProviderKey contextkey = iota
|
||||
|
||||
// User ID value for authenticated users
|
||||
UserIDKey
|
||||
|
||||
// User role if pre-defined
|
||||
UserRoleKey
|
||||
)
|
||||
|
||||
// SuperGraph struct is an instance of the Super Graph engine it holds all the required information like
|
||||
// datase schemas, relationships, etc that the GraphQL to SQL compiler would need to do it's job.
|
||||
type SuperGraph struct {
|
||||
conf *Config
|
||||
db *sql.DB
|
||||
log *_log.Logger
|
||||
dbinfo *psql.DBInfo
|
||||
schema *psql.DBSchema
|
||||
allowList *allow.List
|
||||
encKey [32]byte
|
||||
hashSeed maphash.Seed
|
||||
queries map[uint64]*query
|
||||
roles map[string]*Role
|
||||
getRole *sql.Stmt
|
||||
rmap map[uint64]resolvFn
|
||||
abacEnabled bool
|
||||
qc *qcode.Compiler
|
||||
pc *psql.Compiler
|
||||
ge *graphql.Engine
|
||||
}
|
||||
|
||||
// NewSuperGraph creates the SuperGraph struct, this involves querying the database to learn its
|
||||
// schemas and relationships
|
||||
func NewSuperGraph(conf *Config, db *sql.DB) (*SuperGraph, error) {
|
||||
return newSuperGraph(conf, db, nil)
|
||||
}
|
||||
|
||||
// newSuperGraph helps with writing tests and benchmarks
|
||||
func newSuperGraph(conf *Config, db *sql.DB, dbinfo *psql.DBInfo) (*SuperGraph, error) {
|
||||
if conf == nil {
|
||||
conf = &Config{}
|
||||
}
|
||||
|
||||
sg := &SuperGraph{
|
||||
conf: conf,
|
||||
db: db,
|
||||
dbinfo: dbinfo,
|
||||
log: _log.New(os.Stdout, "", 0),
|
||||
hashSeed: maphash.MakeSeed(),
|
||||
}
|
||||
|
||||
if err := sg.initConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initCompilers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initAllowList(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initPrepared(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initResolvers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sg.initGraphQLEgine(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conf.SecretKey != "" {
|
||||
sk := sha256.Sum256([]byte(conf.SecretKey))
|
||||
conf.SecretKey = ""
|
||||
sg.encKey = sk
|
||||
} else {
|
||||
sg.encKey = crypto.NewEncryptionKey()
|
||||
}
|
||||
|
||||
return sg, nil
|
||||
}
|
||||
|
||||
// Result struct contains the output of the GraphQL function this includes resulting json from the
|
||||
// database query and any error information
|
||||
type Result struct {
|
||||
op qcode.QType
|
||||
name string
|
||||
sql string
|
||||
role string
|
||||
|
||||
Error string `json:"message,omitempty"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Extensions *extensions `json:"extensions,omitempty"`
|
||||
}
|
||||
|
||||
// GraphQL function is called on the SuperGraph struct to convert the provided GraphQL query into an
|
||||
// SQL query and execute it on the database. In production mode prepared statements are directly used
|
||||
// and no query compiling takes places.
|
||||
//
|
||||
// In developer mode all names queries are saved into a file `allow.list` and in production mode only
|
||||
// queries from this file can be run.
|
||||
func (sg *SuperGraph) GraphQL(c context.Context, query string, vars json.RawMessage) (*Result, error) {
|
||||
var res Result
|
||||
|
||||
res.op = qcode.GetQType(query)
|
||||
res.name = allow.QueryName(query)
|
||||
|
||||
// use the chirino/graphql library for introspection queries
|
||||
// disabled when allow list is enforced
|
||||
if !sg.conf.UseAllowList && res.name == "IntrospectionQuery" {
|
||||
r := sg.ge.ServeGraphQL(&graphql.Request{Query: query})
|
||||
res.Data = r.Data
|
||||
|
||||
if r.Error() != nil {
|
||||
res.Error = r.Error().Error()
|
||||
}
|
||||
return &res, r.Error()
|
||||
}
|
||||
|
||||
ct := scontext{Context: c, sg: sg, query: query, vars: vars, res: res}
|
||||
|
||||
if len(vars) <= 2 {
|
||||
ct.vars = nil
|
||||
}
|
||||
|
||||
if keyExists(c, UserIDKey) {
|
||||
ct.role = "user"
|
||||
} else {
|
||||
ct.role = "anon"
|
||||
}
|
||||
|
||||
data, err := ct.execQuery()
|
||||
if err != nil {
|
||||
return &ct.res, err
|
||||
}
|
||||
|
||||
ct.res.Data = json.RawMessage(data)
|
||||
|
||||
return &ct.res, nil
|
||||
}
|
||||
|
||||
// GraphQLSchema function return the GraphQL schema for the underlying database connected
|
||||
// to this instance of Super Graph
|
||||
func (sg *SuperGraph) GraphQLSchema() (string, error) {
|
||||
return sg.ge.Schema.String(), nil
|
||||
}
|
||||
|
||||
// Operation function return the operation type from the query. It uses a very fast algorithm to
|
||||
// extract the operation without having to parse the query.
|
||||
func Operation(query string) OpType {
|
||||
return OpType(qcode.GetQType(query))
|
||||
}
|
||||
|
||||
// Name function return the operation name from the query. It uses a very fast algorithm to
|
||||
// extract the operation name without having to parse the query.
|
||||
func Name(query string) string {
|
||||
return allow.QueryName(query)
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
func BenchmarkGraphQL(b *testing.B) {
|
||||
ct := context.WithValue(context.Background(), UserIDKey, "1")
|
||||
|
||||
db, _, err := sqlmock.New()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// mock.ExpectQuery(`^SELECT jsonb_build_object`).WithArgs()
|
||||
c := &Config{}
|
||||
sg, err := newSuperGraph(c, db, psql.GetTestDBInfo())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
query := `
|
||||
query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user {
|
||||
full_name
|
||||
phone
|
||||
email
|
||||
}
|
||||
customers {
|
||||
id
|
||||
email
|
||||
}
|
||||
}
|
||||
users {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err = sg.GraphQL(ct, query, nil)
|
||||
}
|
||||
})
|
||||
|
||||
fmt.Println(err)
|
||||
|
||||
//fmt.Println(mock.ExpectationsWereMet())
|
||||
|
||||
}
|
96
core/args.go
@ -1,96 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
// argList function is used to create a list of arguments to pass
|
||||
// to a prepared statement.
|
||||
|
||||
func (c *scontext) argList(md psql.Metadata) ([]interface{}, error) {
|
||||
params := md.Params()
|
||||
vars := make([]interface{}, len(params))
|
||||
|
||||
var fields map[string]json.RawMessage
|
||||
var err error
|
||||
|
||||
if len(c.vars) != 0 {
|
||||
fields, _, err = jsn.Tree(c.vars)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, p := range params {
|
||||
switch p.Name {
|
||||
case "user_id":
|
||||
if v := c.Value(UserIDKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
case "user_id_provider":
|
||||
if v := c.Value(UserIDProviderKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
case "user_role":
|
||||
if v := c.Value(UserRoleKey); v != nil {
|
||||
vars[i] = v.(string)
|
||||
} else {
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
case "cursor":
|
||||
if v, ok := fields["cursor"]; ok && v[0] == '"' {
|
||||
v1, err := c.sg.decrypt(string(v[1 : len(v)-1]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vars[i] = v1
|
||||
} else {
|
||||
return nil, argErr(p)
|
||||
}
|
||||
|
||||
default:
|
||||
if v, ok := fields[p.Name]; ok {
|
||||
switch {
|
||||
case p.IsArray && v[0] != '[':
|
||||
return nil, fmt.Errorf("variable '%s' should be an array of type '%s'", p.Name, p.Type)
|
||||
|
||||
case p.Type == "json" && v[0] != '[' && v[0] != '{':
|
||||
return nil, fmt.Errorf("variable '%s' should be an array or object", p.Name)
|
||||
}
|
||||
|
||||
switch v[0] {
|
||||
case '[', '{':
|
||||
vars[i] = v
|
||||
|
||||
default:
|
||||
var val interface{}
|
||||
if err := json.Unmarshal(v, &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vars[i] = val
|
||||
}
|
||||
|
||||
} else {
|
||||
return nil, argErr(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vars, nil
|
||||
}
|
||||
|
||||
func argErr(p psql.Param) error {
|
||||
return fmt.Errorf("required variable '%s' of type '%s' must be set", p.Name, p.Type)
|
||||
}
|
183
core/build.go
@ -1,183 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
type stmt struct {
|
||||
role *Role
|
||||
qc *qcode.QCode
|
||||
md psql.Metadata
|
||||
sql string
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) buildStmt(qt qcode.QType, query, vars []byte, role string) ([]stmt, error) {
|
||||
switch qt {
|
||||
case qcode.QTMutation:
|
||||
return sg.buildRoleStmt(query, vars, role)
|
||||
|
||||
case qcode.QTQuery:
|
||||
if role == "anon" {
|
||||
return sg.buildRoleStmt(query, vars, "anon")
|
||||
}
|
||||
|
||||
if sg.abacEnabled {
|
||||
return sg.buildMultiStmt(query, vars)
|
||||
}
|
||||
|
||||
return sg.buildRoleStmt(query, vars, "user")
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown query type '%d'", qt)
|
||||
}
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) buildRoleStmt(query, vars []byte, role string) ([]stmt, error) {
|
||||
ro, ok := sg.roles[role]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(`roles '%s' not defined in c.sg.config`, role)
|
||||
}
|
||||
|
||||
var vm map[string]json.RawMessage
|
||||
var err error
|
||||
|
||||
if len(vars) != 0 {
|
||||
if err := json.Unmarshal(vars, &vm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
qc, err := sg.qc.Compile(query, ro.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmts := []stmt{stmt{role: ro, qc: qc}}
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
stmts[0].md, err = sg.pc.Compile(w, qc, psql.Variables(vm))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmts[0].sql = w.String()
|
||||
|
||||
return stmts, nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) buildMultiStmt(query, vars []byte) ([]stmt, error) {
|
||||
var vm map[string]json.RawMessage
|
||||
var err error
|
||||
|
||||
if len(vars) != 0 {
|
||||
if err := json.Unmarshal(vars, &vm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if sg.conf.RolesQuery == "" {
|
||||
return nil, errors.New("roles_query not defined")
|
||||
}
|
||||
|
||||
stmts := make([]stmt, 0, len(sg.conf.Roles))
|
||||
w := &bytes.Buffer{}
|
||||
md := psql.Metadata{}
|
||||
|
||||
for i := 0; i < len(sg.conf.Roles); i++ {
|
||||
role := &sg.conf.Roles[i]
|
||||
|
||||
// skip anon as it's not included in the combined multi-statement
|
||||
if role.Name == "anon" {
|
||||
continue
|
||||
}
|
||||
|
||||
qc, err := sg.qc.Compile(query, role.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmts = append(stmts, stmt{role: role, qc: qc})
|
||||
s := &stmts[len(stmts)-1]
|
||||
|
||||
md, err = sg.pc.CompileWithMetadata(w, qc, psql.Variables(vm), md)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.sql = w.String()
|
||||
s.md = md
|
||||
|
||||
w.Reset()
|
||||
}
|
||||
|
||||
sql, err := sg.renderUserQuery(md, stmts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmts[0].sql = sql
|
||||
return stmts, nil
|
||||
}
|
||||
|
||||
//nolint: errcheck
|
||||
func (sg *SuperGraph) renderUserQuery(md psql.Metadata, stmts []stmt) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
io.WriteString(w, `SELECT "_sg_auth_info"."role", (CASE "_sg_auth_info"."role" `)
|
||||
|
||||
for _, s := range stmts {
|
||||
if s.role.Match == "" &&
|
||||
s.role.Name != "user" && s.role.Name != "anon" {
|
||||
continue
|
||||
}
|
||||
io.WriteString(w, `WHEN '`)
|
||||
io.WriteString(w, s.role.Name)
|
||||
io.WriteString(w, `' THEN (`)
|
||||
io.WriteString(w, s.sql)
|
||||
io.WriteString(w, `) `)
|
||||
}
|
||||
|
||||
io.WriteString(w, `END) FROM (SELECT (CASE WHEN EXISTS (`)
|
||||
md.RenderVar(w, sg.conf.RolesQuery)
|
||||
io.WriteString(w, `) THEN `)
|
||||
|
||||
io.WriteString(w, `(SELECT (CASE`)
|
||||
for _, s := range stmts {
|
||||
if s.role.Match == "" {
|
||||
continue
|
||||
}
|
||||
io.WriteString(w, ` WHEN `)
|
||||
io.WriteString(w, s.role.Match)
|
||||
io.WriteString(w, ` THEN '`)
|
||||
io.WriteString(w, s.role.Name)
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
|
||||
io.WriteString(w, ` ELSE 'user' END) FROM (`)
|
||||
md.RenderVar(w, sg.conf.RolesQuery)
|
||||
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
|
||||
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler") AS "_sg_auth_info"(role) LIMIT 1; `)
|
||||
|
||||
return w.String(), nil
|
||||
}
|
||||
|
||||
// func (sg *SuperGraph) hasTablesWithConfig(qc *qcode.QCode, role *Role) bool {
|
||||
// for _, id := range qc.Roots {
|
||||
// t, err := sg.schema.GetTable(qc.Selects[id].Name)
|
||||
// if err != nil {
|
||||
// return false
|
||||
// }
|
||||
|
||||
// if r := role.GetTable(t.Name); r == nil {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
// return true
|
||||
// }
|
256
core/config.go
@ -1,256 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Core struct contains core specific config value
|
||||
type Config struct {
|
||||
// SecretKey is used to encrypt opaque values such as
|
||||
// the cursor. Auto-generated if not set
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
|
||||
// UseAllowList (aka production mode) when set to true ensures
|
||||
// only queries lists in the allow.list file can be used. All
|
||||
// queries are pre-prepared so no compiling happens and things are
|
||||
// very fast.
|
||||
UseAllowList bool `mapstructure:"use_allow_list"`
|
||||
|
||||
// AllowListFile if the path to allow list file if not set the
|
||||
// path is assumed to tbe the same as the config path (allow.list)
|
||||
AllowListFile string `mapstructure:"allow_list_file"`
|
||||
|
||||
// SetUserID forces the database session variable `user.id` to
|
||||
// be set to the user id. This variables can be used by triggers
|
||||
// or other database functions
|
||||
SetUserID bool `mapstructure:"set_user_id"`
|
||||
|
||||
// DefaultBlock ensures that in anonymous mode (role 'anon') all tables
|
||||
// are blocked from queries and mutations. To open access to tables in
|
||||
// anonymous mode they have to be added to the 'anon' role config.
|
||||
DefaultBlock bool `mapstructure:"default_block"`
|
||||
|
||||
// Vars is a map of hardcoded variables that can be leveraged in your
|
||||
// queries (eg variable admin_id will be $admin_id in the query)
|
||||
Vars map[string]string `mapstructure:"variables"`
|
||||
|
||||
// Blocklist is a list of tables and columns that should be filtered
|
||||
// out from any and all queries
|
||||
Blocklist []string
|
||||
|
||||
// Tables contains all table specific configuration such as aliased tables
|
||||
// creating relationships between tables, etc
|
||||
Tables []Table
|
||||
|
||||
// RolesQuery if set enabled attributed based access control. This query
|
||||
// is use to fetch the user attributes that then dynamically define the users
|
||||
// role.
|
||||
RolesQuery string `mapstructure:"roles_query"`
|
||||
|
||||
// Roles contains all the configuration for all the roles you want to support
|
||||
// `user` and `anon` are two default roles. User role is for when a user ID is
|
||||
// available and Anon when it's not.
|
||||
//
|
||||
// If you're using the RolesQuery config to enable atribute based acess control then
|
||||
// you can add more custom roles.
|
||||
Roles []Role
|
||||
|
||||
// Inflections is to add additionally singular to plural mappings
|
||||
// to the engine (eg. sheep: sheep)
|
||||
Inflections map[string]string `mapstructure:"inflections"`
|
||||
|
||||
// Database schema name. Defaults to 'public'
|
||||
DBSchema string `mapstructure:"db_schema"`
|
||||
}
|
||||
|
||||
// Table struct defines a database table
|
||||
type Table struct {
|
||||
Name string
|
||||
Table string
|
||||
Type string
|
||||
Blocklist []string
|
||||
Remotes []Remote
|
||||
Columns []Column
|
||||
}
|
||||
|
||||
// Column struct defines a database column
|
||||
type Column struct {
|
||||
Name string
|
||||
Type string
|
||||
ForeignKey string `mapstructure:"related_to"`
|
||||
}
|
||||
|
||||
// Remote struct defines a remote API endpoint
|
||||
type Remote struct {
|
||||
Name string
|
||||
ID string
|
||||
Path string
|
||||
URL string
|
||||
Debug bool
|
||||
PassHeaders []string `mapstructure:"pass_headers"`
|
||||
SetHeaders []struct {
|
||||
Name string
|
||||
Value string
|
||||
} `mapstructure:"set_headers"`
|
||||
}
|
||||
|
||||
// Role struct contains role specific access control values for for all database tables
|
||||
type Role struct {
|
||||
Name string
|
||||
Match string
|
||||
Tables []RoleTable
|
||||
tm map[string]*RoleTable
|
||||
}
|
||||
|
||||
// RoleTable struct contains role specific access control values for a database table
|
||||
type RoleTable struct {
|
||||
Name string
|
||||
ReadOnly bool `mapstructure:"read_only"`
|
||||
|
||||
Query *Query
|
||||
Insert *Insert
|
||||
Update *Update
|
||||
Delete *Delete
|
||||
}
|
||||
|
||||
// Query struct contains access control values for query operations
|
||||
type Query struct {
|
||||
Limit int
|
||||
Filters []string
|
||||
Columns []string
|
||||
DisableFunctions bool `mapstructure:"disable_functions"`
|
||||
Block bool
|
||||
}
|
||||
|
||||
// Insert struct contains access control values for insert operations
|
||||
type Insert struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block bool
|
||||
}
|
||||
|
||||
// Insert struct contains access control values for update operations
|
||||
type Update struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block bool
|
||||
}
|
||||
|
||||
// Delete struct contains access control values for delete operations
|
||||
type Delete struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Block bool
|
||||
}
|
||||
|
||||
// AddRoleTable function is a helper function to make it easy to add per-table
|
||||
// row-level config
|
||||
func (c *Config) AddRoleTable(role, table string, conf interface{}) error {
|
||||
var r *Role
|
||||
|
||||
for i := range c.Roles {
|
||||
if strings.EqualFold(c.Roles[i].Name, role) {
|
||||
r = &c.Roles[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if r == nil {
|
||||
nr := Role{Name: role}
|
||||
c.Roles = append(c.Roles, nr)
|
||||
r = &nr
|
||||
}
|
||||
|
||||
var t *RoleTable
|
||||
for i := range r.Tables {
|
||||
if strings.EqualFold(r.Tables[i].Name, table) {
|
||||
t = &r.Tables[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
nt := RoleTable{Name: table}
|
||||
r.Tables = append(r.Tables, nt)
|
||||
t = &nt
|
||||
}
|
||||
|
||||
switch v := conf.(type) {
|
||||
case Query:
|
||||
t.Query = &v
|
||||
case Insert:
|
||||
t.Insert = &v
|
||||
case Update:
|
||||
t.Update = &v
|
||||
case Delete:
|
||||
t.Delete = &v
|
||||
default:
|
||||
return fmt.Errorf("unsupported object type: %t", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadInConfig function reads in the config file for the environment specified in the GO_ENV
|
||||
// environment variable. This is the best way to create a new Super Graph config.
|
||||
func ReadInConfig(configFile string) (*Config, error) {
|
||||
cp := path.Dir(configFile)
|
||||
vi := newViper(cp, path.Base(configFile))
|
||||
|
||||
if err := vi.ReadInConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pcf := vi.GetString("inherits"); pcf != "" {
|
||||
cf := vi.ConfigFileUsed()
|
||||
vi = newViper(cp, pcf)
|
||||
|
||||
if err := vi.ReadInConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v := vi.GetString("inherits"); v != "" {
|
||||
return nil, fmt.Errorf("inherited config (%s) cannot itself inherit (%s)", pcf, v)
|
||||
}
|
||||
|
||||
vi.SetConfigFile(cf)
|
||||
|
||||
if err := vi.MergeInConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
c := &Config{}
|
||||
|
||||
if err := vi.Unmarshal(&c); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode config, %v", err)
|
||||
}
|
||||
|
||||
if c.AllowListFile == "" {
|
||||
c.AllowListFile = path.Join(cp, "allow.list")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func newViper(configPath, configFile string) *viper.Viper {
|
||||
vi := viper.New()
|
||||
|
||||
vi.SetEnvPrefix("SG")
|
||||
vi.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
vi.AutomaticEnv()
|
||||
|
||||
if filepath.Ext(configFile) != "" {
|
||||
vi.SetConfigFile(path.Join(configPath, configFile))
|
||||
} else {
|
||||
vi.SetConfigName(configFile)
|
||||
vi.AddConfigPath(configPath)
|
||||
vi.AddConfigPath("./config")
|
||||
}
|
||||
|
||||
return vi
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotFound = errors.New("not found in prepared statements")
|
||||
)
|
||||
|
||||
func keyExists(ct context.Context, key contextkey) bool {
|
||||
return ct.Value(key) != nil
|
||||
}
|
446
core/core.go
@ -1,446 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"time"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
type OpType int
|
||||
|
||||
const (
|
||||
OpQuery OpType = iota
|
||||
OpMutation
|
||||
)
|
||||
|
||||
type extensions struct {
|
||||
Tracing *trace `json:"tracing,omitempty"`
|
||||
}
|
||||
|
||||
type trace struct {
|
||||
Version int `json:"version"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
EndTime time.Time `json:"endTime"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
Execution execution `json:"execution"`
|
||||
}
|
||||
|
||||
type execution struct {
|
||||
Resolvers []resolver `json:"resolvers"`
|
||||
}
|
||||
|
||||
type resolver struct {
|
||||
Path []string `json:"path"`
|
||||
ParentType string `json:"parentType"`
|
||||
FieldName string `json:"fieldName"`
|
||||
ReturnType string `json:"returnType"`
|
||||
StartOffset int `json:"startOffset"`
|
||||
Duration time.Duration `json:"duration"`
|
||||
}
|
||||
|
||||
type scontext struct {
|
||||
context.Context
|
||||
|
||||
sg *SuperGraph
|
||||
query string
|
||||
vars json.RawMessage
|
||||
role string
|
||||
res Result
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initCompilers() error {
|
||||
var err error
|
||||
var schema string
|
||||
|
||||
if sg.conf.DBSchema == "" {
|
||||
schema = "public"
|
||||
} else {
|
||||
schema = sg.conf.DBSchema
|
||||
}
|
||||
|
||||
// If sg.di is not null then it's probably set
|
||||
// for tests
|
||||
if sg.dbinfo == nil {
|
||||
sg.dbinfo, err = psql.GetDBInfo(sg.db, schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(sg.dbinfo.Tables) == 0 {
|
||||
return fmt.Errorf("no tables found in database (schema: %s)", schema)
|
||||
}
|
||||
|
||||
if err = addTables(sg.conf, sg.dbinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = addForeignKeys(sg.conf, sg.dbinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.schema, err = psql.NewDBSchema(sg.dbinfo, getDBTableAliases(sg.conf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.qc, err = qcode.NewCompiler(qcode.Config{
|
||||
DefaultBlock: sg.conf.DefaultBlock,
|
||||
Blocklist: sg.conf.Blocklist,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := addRoles(sg.conf, sg.qc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sg.pc = psql.NewCompiler(psql.Config{
|
||||
Schema: sg.schema,
|
||||
Vars: sg.conf.Vars,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *scontext) execQuery() ([]byte, error) {
|
||||
var data []byte
|
||||
var st *stmt
|
||||
var err error
|
||||
|
||||
if c.sg.conf.UseAllowList {
|
||||
data, st, err = c.resolvePreparedSQL()
|
||||
} else {
|
||||
data, st, err = c.resolveSQL()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) == 0 || st.md.Skipped() == 0 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// return c.sg.execRemoteJoin(st, data, c.req.hdr)
|
||||
return c.sg.execRemoteJoin(st, data, nil)
|
||||
}
|
||||
|
||||
func (c *scontext) resolvePreparedSQL() ([]byte, *stmt, error) {
|
||||
var tx *sql.Tx
|
||||
var err error
|
||||
|
||||
mutation := (c.res.op == qcode.QTMutation)
|
||||
useRoleQuery := c.sg.abacEnabled && mutation
|
||||
useTx := useRoleQuery || c.sg.conf.SetUserID
|
||||
|
||||
if useTx {
|
||||
if tx, err = c.sg.db.BeginTx(c, nil); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer tx.Rollback() //nolint: errcheck
|
||||
}
|
||||
|
||||
if c.sg.conf.SetUserID {
|
||||
if err := setLocalUserID(c, tx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var role string
|
||||
|
||||
if useRoleQuery {
|
||||
if role, err = c.executeRoleQuery(tx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
} else if v := c.Value(UserRoleKey); v != nil {
|
||||
role = v.(string)
|
||||
|
||||
} else {
|
||||
role = c.role
|
||||
}
|
||||
|
||||
c.res.role = role
|
||||
|
||||
h := maphash.Hash{}
|
||||
h.SetSeed(c.sg.hashSeed)
|
||||
id := queryID(&h, c.res.name, role)
|
||||
|
||||
q, ok := c.sg.queries[id]
|
||||
if !ok {
|
||||
return nil, nil, errNotFound
|
||||
}
|
||||
|
||||
if q.sd == nil {
|
||||
q.Do(func() { c.sg.prepare(q, role) })
|
||||
|
||||
if q.err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
c.res.sql = q.st.sql
|
||||
|
||||
var root []byte
|
||||
var row *sql.Row
|
||||
|
||||
varsList, err := c.argList(q.st.md)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if useTx {
|
||||
row = tx.Stmt(q.sd).QueryRow(varsList...)
|
||||
} else {
|
||||
row = q.sd.QueryRow(varsList...)
|
||||
}
|
||||
|
||||
if q.roleArg {
|
||||
err = row.Scan(&role, &root)
|
||||
} else {
|
||||
err = row.Scan(&root)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
c.role = role
|
||||
|
||||
if useTx {
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, nil, q.err
|
||||
}
|
||||
}
|
||||
|
||||
if root, err = c.sg.encryptCursor(q.st.qc, root); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return root, &q.st, nil
|
||||
}
|
||||
|
||||
func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
|
||||
var tx *sql.Tx
|
||||
var err error
|
||||
|
||||
mutation := (c.res.op == qcode.QTMutation)
|
||||
useRoleQuery := c.sg.abacEnabled && mutation
|
||||
useTx := useRoleQuery || c.sg.conf.SetUserID
|
||||
|
||||
if useTx {
|
||||
if tx, err = c.sg.db.BeginTx(c, nil); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer tx.Rollback() //nolint: errcheck
|
||||
}
|
||||
|
||||
if c.sg.conf.SetUserID {
|
||||
if err := setLocalUserID(c, tx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if useRoleQuery {
|
||||
if c.role, err = c.executeRoleQuery(tx); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
} else if v := c.Value(UserRoleKey); v != nil {
|
||||
c.role = v.(string)
|
||||
}
|
||||
|
||||
stmts, err := c.sg.buildStmt(c.res.op, []byte(c.query), c.vars, c.role)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
st := &stmts[0]
|
||||
c.res.sql = st.sql
|
||||
|
||||
varList, err := c.argList(st.md)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// finalSQL := buf.String()
|
||||
|
||||
////
|
||||
|
||||
// _, err = t.ExecuteFunc(buf, c.argMap(st.md))
|
||||
// if err != nil {
|
||||
// return nil, nil, err
|
||||
// }
|
||||
// finalSQL := buf.String()
|
||||
|
||||
/////
|
||||
|
||||
// var stime time.Time
|
||||
|
||||
// if c.sg.conf.EnableTracing {
|
||||
// stime = time.Now()
|
||||
// }
|
||||
|
||||
var root []byte
|
||||
var role string
|
||||
var row *sql.Row
|
||||
|
||||
// defaultRole := c.role
|
||||
|
||||
if useTx {
|
||||
row = tx.QueryRowContext(c, st.sql, varList...)
|
||||
} else {
|
||||
row = c.sg.db.QueryRowContext(c, st.sql, varList...)
|
||||
}
|
||||
|
||||
if len(stmts) > 1 {
|
||||
err = row.Scan(&role, &root)
|
||||
} else {
|
||||
err = row.Scan(&root)
|
||||
}
|
||||
|
||||
if role == "" {
|
||||
c.res.role = c.role
|
||||
} else {
|
||||
c.res.role = role
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if useTx {
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if root, err = c.sg.encryptCursor(st.qc, root); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if c.sg.allowList.IsPersist() {
|
||||
if err := c.sg.allowList.Set(c.vars, c.query, ""); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(stmts) > 1 {
|
||||
if st = findStmt(role, stmts); st == nil {
|
||||
return nil, nil, fmt.Errorf("invalid role '%s' returned", role)
|
||||
}
|
||||
}
|
||||
|
||||
// if c.sg.conf.EnableTracing {
|
||||
// for _, id := range st.qc.Roots {
|
||||
// c.addTrace(st.qc.Selects, id, stime)
|
||||
// }
|
||||
// }
|
||||
|
||||
return root, st, nil
|
||||
}
|
||||
|
||||
func (c *scontext) executeRoleQuery(tx *sql.Tx) (string, error) {
|
||||
userID := c.Value(UserIDKey)
|
||||
|
||||
if userID == nil {
|
||||
return "anon", nil
|
||||
}
|
||||
|
||||
var role string
|
||||
row := c.sg.getRole.QueryRow(userID, c.role)
|
||||
|
||||
if err := row.Scan(&role); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return role, nil
|
||||
}
|
||||
|
||||
func (r *Result) Operation() OpType {
|
||||
switch r.op {
|
||||
case qcode.QTQuery:
|
||||
return OpQuery
|
||||
|
||||
case qcode.QTMutation, qcode.QTInsert, qcode.QTUpdate, qcode.QTUpsert, qcode.QTDelete:
|
||||
return OpMutation
|
||||
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Result) OperationName() string {
|
||||
return r.op.String()
|
||||
}
|
||||
|
||||
func (r *Result) QueryName() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r *Result) Role() string {
|
||||
return r.role
|
||||
}
|
||||
|
||||
func (r *Result) SQL() string {
|
||||
return r.sql
|
||||
}
|
||||
|
||||
// func (c *scontext) addTrace(sel []qcode.Select, id int32, st time.Time) {
|
||||
// et := time.Now()
|
||||
// du := et.Sub(st)
|
||||
|
||||
// if c.res.Extensions == nil {
|
||||
// c.res.Extensions = &extensions{&trace{
|
||||
// Version: 1,
|
||||
// StartTime: st,
|
||||
// Execution: execution{},
|
||||
// }}
|
||||
// }
|
||||
|
||||
// c.res.Extensions.Tracing.EndTime = et
|
||||
// c.res.Extensions.Tracing.Duration = du
|
||||
|
||||
// n := 1
|
||||
// for i := id; i != -1; i = sel[i].ParentID {
|
||||
// n++
|
||||
// }
|
||||
// path := make([]string, n)
|
||||
|
||||
// n--
|
||||
// for i := id; ; i = sel[i].ParentID {
|
||||
// path[n] = sel[i].Name
|
||||
// if sel[i].ParentID == -1 {
|
||||
// break
|
||||
// }
|
||||
// n--
|
||||
// }
|
||||
|
||||
// tr := resolver{
|
||||
// Path: path,
|
||||
// ParentType: "Query",
|
||||
// FieldName: sel[id].Name,
|
||||
// ReturnType: "object",
|
||||
// StartOffset: 1,
|
||||
// Duration: du,
|
||||
// }
|
||||
|
||||
// c.res.Extensions.Tracing.Execution.Resolvers =
|
||||
// append(c.res.Extensions.Tracing.Execution.Resolvers, tr)
|
||||
// }
|
||||
|
||||
func findStmt(role string, stmts []stmt) *stmt {
|
||||
for i := range stmts {
|
||||
if stmts[i].role.Name != role {
|
||||
continue
|
||||
}
|
||||
return &stmts[i]
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/crypto"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
func (sg *SuperGraph) encryptCursor(qc *qcode.QCode, data []byte) ([]byte, error) {
|
||||
var keys [][]byte
|
||||
|
||||
for _, s := range qc.Selects {
|
||||
if s.Paging.Type != qcode.PtOffset {
|
||||
var buf bytes.Buffer
|
||||
|
||||
buf.WriteString(s.FieldName)
|
||||
buf.WriteString("_cursor")
|
||||
keys = append(keys, buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
if len(keys) == 0 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
from := jsn.Get(data, keys)
|
||||
to := make([]jsn.Field, len(from))
|
||||
|
||||
for i, f := range from {
|
||||
to[i].Key = f.Key
|
||||
|
||||
if f.Value[0] != '"' || f.Value[len(f.Value)-1] != '"' {
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if len(f.Value) > 2 {
|
||||
v, err := crypto.Encrypt(f.Value[1:len(f.Value)-1], &sg.encKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(base64.StdEncoding.EncodeToString(v))
|
||||
buf.WriteByte('"')
|
||||
} else {
|
||||
buf.WriteString(`null`)
|
||||
}
|
||||
|
||||
to[i].Value = buf.Bytes()
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := jsn.Replace(&buf, data, from, to); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) decrypt(data string) ([]byte, error) {
|
||||
v, err := base64.StdEncoding.DecodeString(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return crypto.Decrypt(v, &sg.encKey)
|
||||
}
|
15
core/db.go
@ -1,15 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
func setLocalUserID(c context.Context, tx *sql.Tx) error {
|
||||
var err error
|
||||
if v := c.Value(UserIDKey); v != nil {
|
||||
_, err = tx.Exec(`SET LOCAL "user.id" = ?`, v)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
341
core/init.go
@ -1,341 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/gobuffalo/flect"
|
||||
)
|
||||
|
||||
func (sg *SuperGraph) initConfig() error {
|
||||
c := sg.conf
|
||||
|
||||
for k, v := range c.Inflections {
|
||||
flect.AddPlural(k, v)
|
||||
}
|
||||
|
||||
// Tables: Validate and sanitize
|
||||
tm := make(map[string]struct{})
|
||||
|
||||
for i := 0; i < len(c.Tables); i++ {
|
||||
t := &c.Tables[i]
|
||||
// t.Name = flect.Pluralize(strings.ToLower(t.Name))
|
||||
|
||||
if _, ok := tm[t.Name]; ok {
|
||||
sg.conf.Tables = append(c.Tables[:i], c.Tables[i+1:]...)
|
||||
sg.log.Printf("WRN duplicate table found: %s", t.Name)
|
||||
}
|
||||
tm[t.Name] = struct{}{}
|
||||
|
||||
t.Table = flect.Pluralize(strings.ToLower(t.Table))
|
||||
}
|
||||
|
||||
sg.roles = make(map[string]*Role)
|
||||
|
||||
for i := 0; i < len(c.Roles); i++ {
|
||||
role := &c.Roles[i]
|
||||
role.Name = sanitize(role.Name)
|
||||
|
||||
if _, ok := sg.roles[role.Name]; ok {
|
||||
c.Roles = append(c.Roles[:i], c.Roles[i+1:]...)
|
||||
sg.log.Printf("WRN duplicate role found: %s", role.Name)
|
||||
}
|
||||
|
||||
role.Match = sanitize(role.Match)
|
||||
role.tm = make(map[string]*RoleTable)
|
||||
|
||||
for n, table := range role.Tables {
|
||||
role.tm[table.Name] = &role.Tables[n]
|
||||
}
|
||||
|
||||
sg.roles[role.Name] = role
|
||||
}
|
||||
|
||||
// If user role not defined then create it
|
||||
if _, ok := sg.roles["user"]; !ok {
|
||||
ur := Role{
|
||||
Name: "user",
|
||||
tm: make(map[string]*RoleTable),
|
||||
}
|
||||
c.Roles = append(c.Roles, ur)
|
||||
sg.roles["user"] = &ur
|
||||
}
|
||||
|
||||
// If anon role is not defined then create it
|
||||
if _, ok := sg.roles["anon"]; !ok {
|
||||
ur := Role{
|
||||
Name: "anon",
|
||||
tm: make(map[string]*RoleTable),
|
||||
}
|
||||
c.Roles = append(c.Roles, ur)
|
||||
sg.roles["anon"] = &ur
|
||||
}
|
||||
|
||||
if c.RolesQuery == "" {
|
||||
sg.log.Printf("INF roles_query not defined: attribute based access control disabled")
|
||||
} else {
|
||||
n := 0
|
||||
for k, v := range sg.roles {
|
||||
if k == "user" || k == "anon" {
|
||||
n++
|
||||
} else if v.Match != "" {
|
||||
n++
|
||||
}
|
||||
}
|
||||
sg.abacEnabled = (n > 2)
|
||||
|
||||
if !sg.abacEnabled {
|
||||
sg.log.Printf("WRN attribute based access control disabled: no custom roles found (with 'match' defined)")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDBTableAliases(c *Config) map[string][]string {
|
||||
m := make(map[string][]string, len(c.Tables))
|
||||
|
||||
for i := range c.Tables {
|
||||
t := c.Tables[i]
|
||||
|
||||
if t.Table != "" && t.Type == "" {
|
||||
m[t.Table] = append(m[t.Table], t.Name)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func addTables(c *Config, di *psql.DBInfo) error {
|
||||
var err error
|
||||
|
||||
for _, t := range c.Tables {
|
||||
switch t.Type {
|
||||
case "json", "jsonb":
|
||||
err = addJsonTable(di, t.Columns, t)
|
||||
|
||||
case "polymorphic":
|
||||
err = addVirtualTable(di, t.Columns, t)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addJsonTable(di *psql.DBInfo, cols []Column, t Table) error {
|
||||
// This is for jsonb columns that want to be tables.
|
||||
bc, ok := di.GetColumn(t.Table, t.Name)
|
||||
if !ok {
|
||||
return fmt.Errorf(
|
||||
"json table: column '%s' not found on table '%s'",
|
||||
t.Name, t.Table)
|
||||
}
|
||||
|
||||
if bc.Type != "json" && bc.Type != "jsonb" {
|
||||
return fmt.Errorf(
|
||||
"json table: column '%s' in table '%s' is of type '%s'. Only JSON or JSONB is valid",
|
||||
t.Name, t.Table, bc.Type)
|
||||
}
|
||||
|
||||
table := psql.DBTable{
|
||||
Name: t.Name,
|
||||
Key: strings.ToLower(t.Name),
|
||||
Type: bc.Type,
|
||||
}
|
||||
|
||||
columns := make([]psql.DBColumn, 0, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
columns = append(columns, psql.DBColumn{
|
||||
Name: c.Name,
|
||||
Key: strings.ToLower(c.Name),
|
||||
Type: c.Type,
|
||||
})
|
||||
}
|
||||
|
||||
di.AddTable(table, columns)
|
||||
bc.FKeyTable = t.Name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addVirtualTable(di *psql.DBInfo, cols []Column, t Table) error {
|
||||
if len(cols) == 0 {
|
||||
return fmt.Errorf("polymorphic table: no id column specified")
|
||||
}
|
||||
|
||||
c := cols[0]
|
||||
|
||||
if c.ForeignKey == "" {
|
||||
return fmt.Errorf("polymorphic table: no 'related_to' specified on id column")
|
||||
}
|
||||
|
||||
s := strings.SplitN(c.ForeignKey, ".", 2)
|
||||
|
||||
if len(s) != 2 {
|
||||
return fmt.Errorf("polymorphic table: foreign key must be <type column>.<foreign key column>")
|
||||
}
|
||||
|
||||
di.VTables = append(di.VTables, psql.VirtualTable{
|
||||
Name: t.Name,
|
||||
IDColumn: c.Name,
|
||||
TypeColumn: s[0],
|
||||
FKeyColumn: s[1],
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addForeignKeys(c *Config, di *psql.DBInfo) error {
|
||||
for _, t := range c.Tables {
|
||||
if t.Type != "" {
|
||||
continue
|
||||
}
|
||||
for _, c := range t.Columns {
|
||||
if c.ForeignKey == "" {
|
||||
continue
|
||||
}
|
||||
if err := addForeignKey(di, c, t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addForeignKey(di *psql.DBInfo, c Column, t Table) error {
|
||||
var tn string
|
||||
|
||||
if t.Type == "polymorphic" {
|
||||
tn = t.Table
|
||||
} else {
|
||||
tn = t.Name
|
||||
}
|
||||
|
||||
c1, ok := di.GetColumn(tn, c.Name)
|
||||
if !ok {
|
||||
return fmt.Errorf(
|
||||
"config: invalid table '%s' or column '%s' defined",
|
||||
tn, c.Name)
|
||||
}
|
||||
|
||||
v := strings.SplitN(c.ForeignKey, ".", 2)
|
||||
if len(v) != 2 {
|
||||
return fmt.Errorf(
|
||||
"config: invalid foreign_key defined for table '%s' and column '%s': %s",
|
||||
tn, c.Name, c.ForeignKey)
|
||||
}
|
||||
|
||||
// check if it's a polymorphic foreign key
|
||||
if _, ok := di.GetColumn(tn, v[0]); ok {
|
||||
c2, ok := di.GetColumn(tn, v[1])
|
||||
if !ok {
|
||||
return fmt.Errorf(
|
||||
"config: invalid column '%s' for polymorphic relationship on table '%s' and column '%s'",
|
||||
v[1], tn, c.Name)
|
||||
}
|
||||
|
||||
c1.FKeyTable = v[0]
|
||||
c1.FKeyColID = []int16{c2.ID}
|
||||
return nil
|
||||
}
|
||||
|
||||
fkt, fkc := v[0], v[1]
|
||||
c3, ok := di.GetColumn(fkt, fkc)
|
||||
if !ok {
|
||||
return fmt.Errorf(
|
||||
"config: foreign_key for table '%s' and column '%s' points to unknown table '%s' and column '%s'",
|
||||
t.Name, c.Name, v[0], v[1])
|
||||
}
|
||||
|
||||
c1.FKeyTable = fkt
|
||||
c1.FKeyColID = []int16{c3.ID}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRoles(c *Config, qc *qcode.Compiler) error {
|
||||
for _, r := range c.Roles {
|
||||
for _, t := range r.Tables {
|
||||
if err := addRole(qc, r, t, c.DefaultBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRole(qc *qcode.Compiler, r Role, t RoleTable, defaultBlock bool) error {
|
||||
ro := false // read-only
|
||||
|
||||
if defaultBlock && r.Name == "anon" {
|
||||
ro = true
|
||||
}
|
||||
|
||||
if t.ReadOnly {
|
||||
ro = true
|
||||
}
|
||||
|
||||
query := qcode.QueryConfig{Block: false}
|
||||
insert := qcode.InsertConfig{Block: ro}
|
||||
update := qcode.UpdateConfig{Block: ro}
|
||||
del := qcode.DeleteConfig{Block: ro}
|
||||
|
||||
if t.Query != nil {
|
||||
query = qcode.QueryConfig{
|
||||
Limit: t.Query.Limit,
|
||||
Filters: t.Query.Filters,
|
||||
Columns: t.Query.Columns,
|
||||
DisableFunctions: t.Query.DisableFunctions,
|
||||
Block: t.Query.Block,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Insert != nil {
|
||||
insert = qcode.InsertConfig{
|
||||
Filters: t.Insert.Filters,
|
||||
Columns: t.Insert.Columns,
|
||||
Presets: t.Insert.Presets,
|
||||
Block: t.Insert.Block,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Update != nil {
|
||||
update = qcode.UpdateConfig{
|
||||
Filters: t.Update.Filters,
|
||||
Columns: t.Update.Columns,
|
||||
Presets: t.Update.Presets,
|
||||
Block: t.Update.Block,
|
||||
}
|
||||
}
|
||||
|
||||
if t.Delete != nil {
|
||||
del = qcode.DeleteConfig{
|
||||
Filters: t.Delete.Filters,
|
||||
Columns: t.Delete.Columns,
|
||||
Block: t.Delete.Block,
|
||||
}
|
||||
}
|
||||
|
||||
return qc.AddRole(r.Name, t.Name, qcode.TRConfig{
|
||||
Query: query,
|
||||
Insert: insert,
|
||||
Update: update,
|
||||
Delete: del,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Role) GetTable(name string) *RoleTable {
|
||||
return r.tm[name]
|
||||
}
|
||||
|
||||
func sanitize(value string) string {
|
||||
return strings.ToLower(strings.TrimSpace(value))
|
||||
}
|
@ -1,349 +0,0 @@
|
||||
package allow
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/scanner"
|
||||
|
||||
"github.com/chirino/graphql/schema"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
const (
|
||||
expComment = iota + 1
|
||||
expVar
|
||||
expQuery
|
||||
)
|
||||
|
||||
type Item struct {
|
||||
Name string
|
||||
key string
|
||||
Query string
|
||||
Vars string
|
||||
Comment string
|
||||
}
|
||||
|
||||
type List struct {
|
||||
filepath string
|
||||
saveChan chan Item
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
CreateIfNotExists bool
|
||||
Persist bool
|
||||
Log *log.Logger
|
||||
}
|
||||
|
||||
func New(filename string, conf Config) (*List, error) {
|
||||
al := List{}
|
||||
|
||||
if filename != "" {
|
||||
fp := filename
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
al.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if al.filepath == "" {
|
||||
fp := "./allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
al.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if al.filepath == "" {
|
||||
fp := "./config/allow.list"
|
||||
|
||||
if _, err := os.Stat(fp); err == nil {
|
||||
al.filepath = fp
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if al.filepath == "" {
|
||||
if !conf.CreateIfNotExists {
|
||||
return nil, errors.New("allow.list not found")
|
||||
}
|
||||
|
||||
if filename == "" {
|
||||
al.filepath = "./config/allow.list"
|
||||
} else {
|
||||
al.filepath = filename
|
||||
}
|
||||
|
||||
if file, err := os.OpenFile(al.filepath, os.O_RDONLY|os.O_CREATE, 0644); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
if conf.Persist {
|
||||
al.saveChan = make(chan Item)
|
||||
|
||||
go func() {
|
||||
for v := range al.saveChan {
|
||||
err := al.save(v)
|
||||
|
||||
if err != nil && conf.Log != nil {
|
||||
conf.Log.Println("WRN allow list save:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &al, nil
|
||||
}
|
||||
|
||||
func (al *List) IsPersist() bool {
|
||||
return al.saveChan != nil
|
||||
}
|
||||
|
||||
func (al *List) Set(vars []byte, query, comment string) error {
|
||||
if al.saveChan == nil {
|
||||
return errors.New("allow.list is read-only")
|
||||
}
|
||||
|
||||
if query == "" {
|
||||
return errors.New("empty query")
|
||||
}
|
||||
|
||||
al.saveChan <- Item{
|
||||
Comment: comment,
|
||||
Query: query,
|
||||
Vars: string(vars),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (al *List) Load() ([]Item, error) {
|
||||
b, err := ioutil.ReadFile(al.filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parse(string(b), al.filepath)
|
||||
}
|
||||
|
||||
func parse(b, filename string) ([]Item, error) {
|
||||
var items []Item
|
||||
|
||||
var s scanner.Scanner
|
||||
s.Init(strings.NewReader(b))
|
||||
s.Filename = filename
|
||||
s.Mode ^= scanner.SkipComments
|
||||
|
||||
var op, sp scanner.Position
|
||||
var item Item
|
||||
|
||||
newComment := false
|
||||
st := expComment
|
||||
|
||||
for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
|
||||
txt := s.TokenText()
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(txt, "/*"):
|
||||
if st == expQuery {
|
||||
v := b[sp.Offset:s.Pos().Offset]
|
||||
item.Query = strings.TrimSpace(v[:strings.LastIndexByte(v, '}')+1])
|
||||
items = append(items, item)
|
||||
}
|
||||
item = Item{Comment: strings.TrimSpace(txt[2 : len(txt)-2])}
|
||||
sp = s.Pos()
|
||||
st = expComment
|
||||
newComment = true
|
||||
|
||||
case !newComment && strings.HasPrefix(txt, "#"):
|
||||
if st == expQuery {
|
||||
v := b[sp.Offset:s.Pos().Offset]
|
||||
item.Query = strings.TrimSpace(v[:strings.LastIndexByte(v, '}')+1])
|
||||
items = append(items, item)
|
||||
}
|
||||
item = Item{}
|
||||
sp = s.Pos()
|
||||
st = expComment
|
||||
|
||||
case strings.HasPrefix(txt, "variables"):
|
||||
if st == expComment {
|
||||
v := b[sp.Offset:s.Pos().Offset]
|
||||
item.Comment = strings.TrimSpace(v[:strings.IndexByte(v, '\n')])
|
||||
}
|
||||
sp = s.Pos()
|
||||
st = expVar
|
||||
|
||||
case isGraphQL(txt):
|
||||
if st == expVar {
|
||||
v := b[sp.Offset:s.Pos().Offset]
|
||||
item.Vars = strings.TrimSpace(v[:strings.LastIndexByte(v, '}')+1])
|
||||
}
|
||||
sp = op
|
||||
st = expQuery
|
||||
|
||||
}
|
||||
op = s.Pos()
|
||||
}
|
||||
|
||||
if st == expQuery {
|
||||
v := b[sp.Offset:s.Pos().Offset]
|
||||
item.Query = strings.TrimSpace(v[:strings.LastIndexByte(v, '}')+1])
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
items[i].Name = QueryName(items[i].Query)
|
||||
items[i].key = strings.ToLower(items[i].Name)
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func isGraphQL(s string) bool {
|
||||
return strings.HasPrefix(s, "query") ||
|
||||
strings.HasPrefix(s, "mutation") ||
|
||||
strings.HasPrefix(s, "subscription")
|
||||
}
|
||||
|
||||
func (al *List) save(item Item) error {
|
||||
var buf bytes.Buffer
|
||||
|
||||
qd := &schema.QueryDocument{}
|
||||
|
||||
if err := qd.Parse(item.Query); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
qd.WriteTo(&buf)
|
||||
query := buf.String()
|
||||
buf.Reset()
|
||||
|
||||
item.Name = QueryName(query)
|
||||
item.key = strings.ToLower(item.Name)
|
||||
|
||||
if item.Name == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := al.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index := -1
|
||||
|
||||
for i, v := range list {
|
||||
if strings.EqualFold(v.Name, item.Name) {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index != -1 {
|
||||
if list[index].Comment != "" {
|
||||
item.Comment = list[index].Comment
|
||||
}
|
||||
list[index] = item
|
||||
} else {
|
||||
list = append(list, item)
|
||||
}
|
||||
|
||||
f, err := os.Create(al.filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
return strings.Compare(list[i].key, list[j].key) == -1
|
||||
})
|
||||
|
||||
for i, v := range list {
|
||||
var vars string
|
||||
if v.Vars != "" {
|
||||
buf.Reset()
|
||||
if err := jsn.Clear(&buf, []byte(v.Vars)); err != nil {
|
||||
continue
|
||||
}
|
||||
vj := json.RawMessage(buf.Bytes())
|
||||
|
||||
if vj, err = json.MarshalIndent(vj, "", " "); err != nil {
|
||||
continue
|
||||
}
|
||||
vars = string(vj)
|
||||
}
|
||||
list[i].Vars = vars
|
||||
list[i].Comment = strings.TrimSpace(v.Comment)
|
||||
}
|
||||
|
||||
for _, v := range list {
|
||||
if v.Comment != "" {
|
||||
_, err = f.WriteString(fmt.Sprintf("/* %s */\n\n", v.Comment))
|
||||
} else {
|
||||
_, err = f.WriteString(fmt.Sprintf("/* %s */\n\n", v.Name))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v.Vars != "" {
|
||||
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", v.Vars))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v.Query))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func QueryName(b string) string {
|
||||
state, s := 0, 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
switch {
|
||||
case state == 2 && !isValidNameChar(b[i]):
|
||||
return b[s:i]
|
||||
case state == 1 && b[i] == '{':
|
||||
return ""
|
||||
case state == 1 && isValidNameChar(b[i]):
|
||||
s = i
|
||||
state = 2
|
||||
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
|
||||
state = 1
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func isValidNameChar(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_'
|
||||
}
|
@ -1,241 +0,0 @@
|
||||
package allow
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGQLName1(t *testing.T) {
|
||||
var q = `
|
||||
query {
|
||||
products(
|
||||
distinct: [price]
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
|
||||
) { id name } }`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "" {
|
||||
t.Fatal("Name should be empty, not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName2(t *testing.T) {
|
||||
var q = `
|
||||
query hakuna_matata
|
||||
|
||||
{
|
||||
products(
|
||||
distinct: [price]
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
|
||||
) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "hakuna_matata" {
|
||||
t.Fatal("Name should be 'hakuna_matata', not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName3(t *testing.T) {
|
||||
var q = `
|
||||
mutation means{ users { id } }`
|
||||
|
||||
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "means" {
|
||||
t.Fatal("Name should be 'means', not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName4(t *testing.T) {
|
||||
var q = `
|
||||
query no_worries
|
||||
users {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if name != "no_worries" {
|
||||
t.Fatal("Name should be 'no_worries', not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGQLName5(t *testing.T) {
|
||||
var q = `
|
||||
{
|
||||
users {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
name := QueryName(q)
|
||||
|
||||
if len(name) != 0 {
|
||||
t.Fatal("Name should be empty, not ", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse1(t *testing.T) {
|
||||
var al = `
|
||||
# Hello world
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"slug": "",
|
||||
"body": "",
|
||||
"post": {
|
||||
"connect": {
|
||||
"slug": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation createComment {
|
||||
comment(insert: $data) {
|
||||
slug
|
||||
body
|
||||
createdAt: created_at
|
||||
totalVotes: cached_votes_total
|
||||
totalReplies: cached_replies_total
|
||||
vote: comment_vote(where: {user_id: {eq: $user_id}}) {
|
||||
created_at
|
||||
__typename
|
||||
}
|
||||
author: user {
|
||||
slug
|
||||
firstName: first_name
|
||||
lastName: last_name
|
||||
pictureURL: picture_url
|
||||
bio
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
# Query named createPost
|
||||
|
||||
query createPost {
|
||||
post(insert: $data) {
|
||||
slug
|
||||
body
|
||||
published
|
||||
createdAt: created_at
|
||||
totalVotes: cached_votes_total
|
||||
totalComments: cached_comments_total
|
||||
vote: post_vote(where: {user_id: {eq: $user_id}}) {
|
||||
created_at
|
||||
__typename
|
||||
}
|
||||
author: user {
|
||||
slug
|
||||
firstName: first_name
|
||||
lastName: last_name
|
||||
pictureURL: picture_url
|
||||
bio
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}`
|
||||
|
||||
_, err := parse(al, "allow.list")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParse2(t *testing.T) {
|
||||
var al = `
|
||||
/* Hello world */
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"slug": "",
|
||||
"body": "",
|
||||
"post": {
|
||||
"connect": {
|
||||
"slug": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutation createComment {
|
||||
comment(insert: $data) {
|
||||
slug
|
||||
body
|
||||
createdAt: created_at
|
||||
totalVotes: cached_votes_total
|
||||
totalReplies: cached_replies_total
|
||||
vote: comment_vote(where: {user_id: {eq: $user_id}}) {
|
||||
created_at
|
||||
__typename
|
||||
}
|
||||
author: user {
|
||||
slug
|
||||
firstName: first_name
|
||||
lastName: last_name
|
||||
pictureURL: picture_url
|
||||
bio
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Query named createPost
|
||||
*/
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"thread": {
|
||||
"connect": {
|
||||
"slug": ""
|
||||
}
|
||||
},
|
||||
"slug": "",
|
||||
"published": false,
|
||||
"body": ""
|
||||
}
|
||||
}
|
||||
|
||||
query createPost {
|
||||
post(insert: $data) {
|
||||
slug
|
||||
body
|
||||
published
|
||||
createdAt: created_at
|
||||
totalVotes: cached_votes_total
|
||||
totalComments: cached_comments_total
|
||||
vote: post_vote(where: {user_id: {eq: $user_id}}) {
|
||||
created_at
|
||||
__typename
|
||||
}
|
||||
author: user {
|
||||
slug
|
||||
firstName: first_name
|
||||
lastName: last_name
|
||||
pictureURL: picture_url
|
||||
bio
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}`
|
||||
|
||||
_, err := parse(al, "allow.list")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
package allow
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFuzzCrashers(t *testing.T) {
|
||||
var crashers = []string{
|
||||
"query",
|
||||
"q",
|
||||
"que",
|
||||
}
|
||||
|
||||
for _, f := range crashers {
|
||||
_ = QueryName(f)
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
// cryptopasta - basic cryptography examples
|
||||
//
|
||||
// Written in 2015 by George Tankersley <george.tankersley@gmail.com>
|
||||
//
|
||||
// To the extent possible under law, the author(s) have dedicated all copyright
|
||||
// and related and neighboring rights to this software to the public domain
|
||||
// worldwide. This software is distributed without any warranty.
|
||||
//
|
||||
// You should have received a copy of the CC0 Public Domain Dedication along
|
||||
// with this software. If not, see // <http://creativecommons.org/publicdomain/zero/1.0/>.
|
||||
|
||||
// Provides symmetric authenticated encryption using 256-bit AES-GCM with a random nonce.
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// NewEncryptionKey generates a random 256-bit key for Encrypt() and
|
||||
// Decrypt(). It panics if the source of randomness fails.
|
||||
func NewEncryptionKey() [32]byte {
|
||||
key := [32]byte{}
|
||||
_, err := io.ReadFull(rand.Reader, key[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// Encrypt encrypts data using 256-bit AES-GCM. This both hides the content of
|
||||
// the data and provides a check that it hasn't been altered. Output takes the
|
||||
// form nonce|ciphertext|tag where '|' indicates concatenation.
|
||||
func Encrypt(plaintext []byte, key *[32]byte) (ciphertext []byte, err error) {
|
||||
block, err := aes.NewCipher(key[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
_, err = io.ReadFull(rand.Reader, nonce)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gcm.Seal(nonce, nonce, plaintext, nil), nil
|
||||
}
|
||||
|
||||
// Decrypt decrypts data using 256-bit AES-GCM. This both hides the content of
|
||||
// the data and provides a check that it hasn't been altered. Expects input
|
||||
// form nonce|ciphertext|tag where '|' indicates concatenation.
|
||||
func Decrypt(ciphertext []byte, key *[32]byte) (plaintext []byte, err error) {
|
||||
block, err := aes.NewCipher(key[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ciphertext) < gcm.NonceSize() {
|
||||
return nil, errors.New("malformed ciphertext")
|
||||
}
|
||||
|
||||
return gcm.Open(nil,
|
||||
ciphertext[:gcm.NonceSize()],
|
||||
ciphertext[gcm.NonceSize():],
|
||||
nil,
|
||||
)
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
package cockraochdb_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
integration_tests "github.com/dosco/super-graph/core/internal/integration_tests"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCockroachDB(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "temp-cockraochdb-")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("cockroach", "start", "--insecure", "--listen-addr", ":0", "--http-addr", ":0", "--store=path="+dir)
|
||||
finder := &urlFinder{
|
||||
c: make(chan bool),
|
||||
}
|
||||
cmd.Stdout = finder
|
||||
cmd.Stderr = ioutil.Discard
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
t.Skip("is CockroachDB installed?: " + err.Error())
|
||||
return
|
||||
}
|
||||
fmt.Println("started temporary cockroach db")
|
||||
|
||||
stopped := int32(0)
|
||||
stopDatabase := func() {
|
||||
fmt.Println("stopping temporary cockroach db")
|
||||
if atomic.CompareAndSwapInt32(&stopped, 0, 1) {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := cmd.Process.Wait(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
defer stopDatabase()
|
||||
|
||||
// Wait till we figure out the URL we should connect to...
|
||||
<-finder.c
|
||||
db, err := sql.Open("pgx", finder.URL)
|
||||
if err != nil {
|
||||
stopDatabase()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
integration_tests.SetupSchema(t, db)
|
||||
|
||||
integration_tests.TestSuperGraph(t, db, func(t *testing.T) {
|
||||
if t.Name() == "TestCockroachDB/nested_insert" {
|
||||
t.Skip("nested inserts currently not working yet on cockroach db")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type urlFinder struct {
|
||||
c chan bool
|
||||
done bool
|
||||
URL string
|
||||
}
|
||||
|
||||
func (finder *urlFinder) Write(p []byte) (n int, err error) {
|
||||
s := string(p)
|
||||
urlRegex := regexp.MustCompile(`\nsql:\s+(postgresql:[^\s]+)\n`)
|
||||
if !finder.done {
|
||||
submatch := urlRegex.FindAllStringSubmatch(s, -1)
|
||||
if submatch != nil {
|
||||
finder.URL = submatch[0][1]
|
||||
finder.done = true
|
||||
close(finder.c)
|
||||
}
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
@ -1,249 +0,0 @@
|
||||
package integration_tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func SetupSchema(t *testing.T, db *sql.DB) {
|
||||
|
||||
_, err := db.Exec(`
|
||||
CREATE TABLE users (
|
||||
id integer PRIMARY KEY,
|
||||
full_name text
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE product (
|
||||
id integer PRIMARY KEY,
|
||||
name text,
|
||||
weight float
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE line_item (
|
||||
id integer PRIMARY KEY,
|
||||
product integer REFERENCES product(id),
|
||||
quantity integer,
|
||||
price float
|
||||
)`)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func DropSchema(t *testing.T, db *sql.DB) {
|
||||
|
||||
_, err := db.Exec(`DROP TABLE IF EXISTS line_item`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`DROP TABLE IF EXISTS product`)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.Exec(`DROP TABLE IF EXISTS users`)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSuperGraph(t *testing.T, db *sql.DB, before func(t *testing.T)) {
|
||||
config := core.Config{}
|
||||
config.UseAllowList = false
|
||||
config.AllowListFile = "./allow.list"
|
||||
config.RolesQuery = `SELECT * FROM users WHERE id = $user_id`
|
||||
|
||||
sg, err := core.NewSuperGraph(&config, db)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("seed fixtures", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { products (insert: $products) { id } }`,
|
||||
json.RawMessage(`{"products":[
|
||||
{"id":1, "name":"Charmin Ultra Soft", "weight": 0.5},
|
||||
{"id":2, "name":"Hand Sanitizer", "weight": 0.2},
|
||||
{"id":3, "name":"Case of Corona", "weight": 1.2}
|
||||
]}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"products": [{"id": 1}, {"id": 2}, {"id": 3}]}`, string(res.Data))
|
||||
|
||||
res, err = sg.GraphQL(ctx,
|
||||
`mutation { line_items (insert: $line_items) { id } }`,
|
||||
json.RawMessage(`{"line_items":[
|
||||
{"id":5001, "product":1, "price":6.95, "quantity":10},
|
||||
{"id":5002, "product":2, "price":10.99, "quantity":2}
|
||||
]}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5001}, {"id": 5002}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("get line item", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`query { line_item(id:$id) { id, price, quantity } }`,
|
||||
json.RawMessage(`{"id":5001}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5001, "price": 6.95, "quantity": 10}}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("get line items", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`query { line_items { id, price, quantity } }`,
|
||||
json.RawMessage(`{}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5001, "price": 6.95, "quantity": 10}, {"id": 5002, "price": 10.99, "quantity": 2}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("update line item", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { line_item(update:$update, id:$id) { id } }`,
|
||||
json.RawMessage(`{"id":5001, "update":{"quantity":20}}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5001}}`, string(res.Data))
|
||||
|
||||
res, err = sg.GraphQL(ctx,
|
||||
`query { line_item(id:$id) { id, price, quantity } }`,
|
||||
json.RawMessage(`{"id":5001}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5001, "price": 6.95, "quantity": 20}}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("delete line item", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { line_item(delete:true, id:$id) { id } }`,
|
||||
json.RawMessage(`{"id":5002}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_item": {"id": 5002}}`, string(res.Data))
|
||||
|
||||
res, err = sg.GraphQL(ctx,
|
||||
`query { line_items { id, price, quantity } }`,
|
||||
json.RawMessage(`{}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5001, "price": 6.95, "quantity": 20}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("nested insert", func(t *testing.T) {
|
||||
before(t)
|
||||
res, err := sg.GraphQL(ctx,
|
||||
`mutation { line_items (insert: $line_item) { id, product { name } } }`,
|
||||
json.RawMessage(`{"line_item":
|
||||
{"id":5003, "product": { "connect": { "id": 1} }, "price":10.95, "quantity":15}
|
||||
}`))
|
||||
require.NoError(t, err, res.SQL())
|
||||
require.Equal(t, `{"line_items": [{"id": 5003, "product": {"name": "Charmin Ultra Soft"}}]}`, string(res.Data))
|
||||
})
|
||||
|
||||
t.Run("schema introspection", func(t *testing.T) {
|
||||
before(t)
|
||||
schema, err := sg.GraphQLSchema()
|
||||
require.NoError(t, err)
|
||||
// Uncomment the following line if you need to regenerate the expected schema.
|
||||
//ioutil.WriteFile("../introspection.graphql", []byte(schema), 0644)
|
||||
expected, err := ioutil.ReadFile("../introspection.graphql")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(expected), schema)
|
||||
})
|
||||
|
||||
res, err := sg.GraphQL(ctx, introspectionQuery, json.RawMessage(``))
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, string(res.Data),
|
||||
`{"queryType":{"name":"Query"},"mutationType":{"name":"Mutation"},"subscriptionType":null,"types":`)
|
||||
}
|
||||
|
||||
const introspectionQuery = `
|
||||
query IntrospectionQuery {
|
||||
__schema {
|
||||
queryType { name }
|
||||
mutationType { name }
|
||||
subscriptionType { name }
|
||||
types {
|
||||
...FullType
|
||||
}
|
||||
directives {
|
||||
name
|
||||
description
|
||||
locations
|
||||
args {
|
||||
...InputValue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fragment FullType on __Type {
|
||||
kind
|
||||
name
|
||||
description
|
||||
fields(includeDeprecated: true) {
|
||||
name
|
||||
description
|
||||
args {
|
||||
...InputValue
|
||||
}
|
||||
type {
|
||||
...TypeRef
|
||||
}
|
||||
isDeprecated
|
||||
deprecationReason
|
||||
}
|
||||
inputFields {
|
||||
...InputValue
|
||||
}
|
||||
interfaces {
|
||||
...TypeRef
|
||||
}
|
||||
enumValues(includeDeprecated: true) {
|
||||
name
|
||||
description
|
||||
isDeprecated
|
||||
deprecationReason
|
||||
}
|
||||
possibleTypes {
|
||||
...TypeRef
|
||||
}
|
||||
}
|
||||
fragment InputValue on __InputValue {
|
||||
name
|
||||
description
|
||||
type { ...TypeRef }
|
||||
defaultValue
|
||||
}
|
||||
fragment TypeRef on __Type {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
ofType {
|
||||
kind
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
@ -1,319 +0,0 @@
|
||||
input FloatExpression {
|
||||
contained_in:String!
|
||||
contains:[Float!]!
|
||||
eq:Float!
|
||||
equals:Float!
|
||||
greater_or_equals:Float!
|
||||
greater_than:Float!
|
||||
gt:Float!
|
||||
gte:Float!
|
||||
has_key:Float!
|
||||
has_key_all:[Float!]!
|
||||
has_key_any:[Float!]!
|
||||
ilike:String!
|
||||
in:[Float!]!
|
||||
is_null:Boolean!
|
||||
lesser_or_equals:Float!
|
||||
lesser_than:Float!
|
||||
like:String!
|
||||
lt:Float!
|
||||
lte:Float!
|
||||
neq:Float!
|
||||
nilike:String!
|
||||
nin:[Float!]!
|
||||
nlike:String!
|
||||
not_equals:Float!
|
||||
not_ilike:String!
|
||||
not_in:[Float!]!
|
||||
not_like:String!
|
||||
not_similar:String!
|
||||
nsimilar:String!
|
||||
similar:String!
|
||||
}
|
||||
input IntExpression {
|
||||
contained_in:String!
|
||||
contains:[Int!]!
|
||||
eq:Int!
|
||||
equals:Int!
|
||||
greater_or_equals:Int!
|
||||
greater_than:Int!
|
||||
gt:Int!
|
||||
gte:Int!
|
||||
has_key:Int!
|
||||
has_key_all:[Int!]!
|
||||
has_key_any:[Int!]!
|
||||
ilike:String!
|
||||
in:[Int!]!
|
||||
is_null:Boolean!
|
||||
lesser_or_equals:Int!
|
||||
lesser_than:Int!
|
||||
like:String!
|
||||
lt:Int!
|
||||
lte:Int!
|
||||
neq:Int!
|
||||
nilike:String!
|
||||
nin:[Int!]!
|
||||
nlike:String!
|
||||
not_equals:Int!
|
||||
not_ilike:String!
|
||||
not_in:[Int!]!
|
||||
not_like:String!
|
||||
not_similar:String!
|
||||
nsimilar:String!
|
||||
similar:String!
|
||||
}
|
||||
type Mutation {
|
||||
line_item(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:line_itemInput, update:line_itemInput, upsert:line_itemInput
|
||||
):line_itemOutput
|
||||
line_items(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:line_itemInput, update:line_itemInput, upsert:line_itemInput, inserts:[line_itemInput!]!, updates:[line_itemInput!]!, upserts:[line_itemInput!]!
|
||||
):line_itemOutput
|
||||
product(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:productInput, update:productInput, upsert:productInput
|
||||
):productOutput
|
||||
products(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:productInput, update:productInput, upsert:productInput, inserts:[productInput!]!, updates:[productInput!]!, upserts:[productInput!]!
|
||||
):productOutput
|
||||
user(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:userInput, update:userInput, upsert:userInput
|
||||
):userOutput
|
||||
users(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!, insert:userInput, update:userInput, upsert:userInput, inserts:[userInput!]!, updates:[userInput!]!, upserts:[userInput!]!
|
||||
):userOutput
|
||||
}
|
||||
enum OrderDirection {
|
||||
asc
|
||||
desc
|
||||
}
|
||||
type Query {
|
||||
line_item(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):line_itemOutput
|
||||
line_items(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:line_itemOrderBy!, where:line_itemExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):[line_itemOutput!]!
|
||||
product(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):productOutput
|
||||
products(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:productOrderBy!, where:productExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):[productOutput!]!
|
||||
user(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):userOutput
|
||||
users(
|
||||
"To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."
|
||||
order_by:userOrderBy!, where:userExpression!, limit:Int!, offset:Int!, first:Int!, last:Int!, before:String, after:String,
|
||||
"Finds the record by the primary key"
|
||||
id:Int!
|
||||
):[userOutput!]!
|
||||
}
|
||||
input StringExpression {
|
||||
contained_in:String!
|
||||
contains:[String!]!
|
||||
eq:String!
|
||||
equals:String!
|
||||
greater_or_equals:String!
|
||||
greater_than:String!
|
||||
gt:String!
|
||||
gte:String!
|
||||
has_key:String!
|
||||
has_key_all:[String!]!
|
||||
has_key_any:[String!]!
|
||||
ilike:String!
|
||||
in:[String!]!
|
||||
is_null:Boolean!
|
||||
lesser_or_equals:String!
|
||||
lesser_than:String!
|
||||
like:String!
|
||||
lt:String!
|
||||
lte:String!
|
||||
neq:String!
|
||||
nilike:String!
|
||||
nin:[String!]!
|
||||
nlike:String!
|
||||
not_equals:String!
|
||||
not_ilike:String!
|
||||
not_in:[String!]!
|
||||
not_like:String!
|
||||
not_similar:String!
|
||||
nsimilar:String!
|
||||
similar:String!
|
||||
}
|
||||
input line_itemExpression {
|
||||
and:line_itemExpression!
|
||||
id:IntExpression!
|
||||
not:line_itemExpression!
|
||||
or:line_itemExpression!
|
||||
price:FloatExpression!
|
||||
product:IntExpression!
|
||||
quantity:IntExpression!
|
||||
}
|
||||
input line_itemInput {
|
||||
id:Int!
|
||||
price:Float
|
||||
product:Int
|
||||
quantity:Int
|
||||
}
|
||||
input line_itemOrderBy {
|
||||
id:OrderDirection!
|
||||
price:OrderDirection!
|
||||
product:OrderDirection!
|
||||
quantity:OrderDirection!
|
||||
}
|
||||
type line_itemOutput {
|
||||
avg_id:Int!
|
||||
avg_price:Float
|
||||
avg_product:Int
|
||||
avg_quantity:Int
|
||||
count_id:Int!
|
||||
count_price:Float
|
||||
count_product:Int
|
||||
count_quantity:Int
|
||||
id:Int!
|
||||
max_id:Int!
|
||||
max_price:Float
|
||||
max_product:Int
|
||||
max_quantity:Int
|
||||
min_id:Int!
|
||||
min_price:Float
|
||||
min_product:Int
|
||||
min_quantity:Int
|
||||
price:Float
|
||||
product:Int
|
||||
quantity:Int
|
||||
stddev_id:Int!
|
||||
stddev_pop_id:Int!
|
||||
stddev_pop_price:Float
|
||||
stddev_pop_product:Int
|
||||
stddev_pop_quantity:Int
|
||||
stddev_price:Float
|
||||
stddev_product:Int
|
||||
stddev_quantity:Int
|
||||
stddev_samp_id:Int!
|
||||
stddev_samp_price:Float
|
||||
stddev_samp_product:Int
|
||||
stddev_samp_quantity:Int
|
||||
var_pop_id:Int!
|
||||
var_pop_price:Float
|
||||
var_pop_product:Int
|
||||
var_pop_quantity:Int
|
||||
var_samp_id:Int!
|
||||
var_samp_price:Float
|
||||
var_samp_product:Int
|
||||
var_samp_quantity:Int
|
||||
variance_id:Int!
|
||||
variance_price:Float
|
||||
variance_product:Int
|
||||
variance_quantity:Int
|
||||
}
|
||||
input productExpression {
|
||||
and:productExpression!
|
||||
id:IntExpression!
|
||||
name:StringExpression!
|
||||
not:productExpression!
|
||||
or:productExpression!
|
||||
weight:FloatExpression!
|
||||
}
|
||||
input productInput {
|
||||
id:Int!
|
||||
name:String
|
||||
weight:Float
|
||||
}
|
||||
input productOrderBy {
|
||||
id:OrderDirection!
|
||||
name:OrderDirection!
|
||||
weight:OrderDirection!
|
||||
}
|
||||
type productOutput {
|
||||
avg_id:Int!
|
||||
avg_weight:Float
|
||||
count_id:Int!
|
||||
count_weight:Float
|
||||
id:Int!
|
||||
max_id:Int!
|
||||
max_weight:Float
|
||||
min_id:Int!
|
||||
min_weight:Float
|
||||
name:String
|
||||
stddev_id:Int!
|
||||
stddev_pop_id:Int!
|
||||
stddev_pop_weight:Float
|
||||
stddev_samp_id:Int!
|
||||
stddev_samp_weight:Float
|
||||
stddev_weight:Float
|
||||
var_pop_id:Int!
|
||||
var_pop_weight:Float
|
||||
var_samp_id:Int!
|
||||
var_samp_weight:Float
|
||||
variance_id:Int!
|
||||
variance_weight:Float
|
||||
weight:Float
|
||||
}
|
||||
input userExpression {
|
||||
and:userExpression!
|
||||
full_name:StringExpression!
|
||||
id:IntExpression!
|
||||
not:userExpression!
|
||||
or:userExpression!
|
||||
}
|
||||
input userInput {
|
||||
full_name:String
|
||||
id:Int!
|
||||
}
|
||||
input userOrderBy {
|
||||
full_name:OrderDirection!
|
||||
id:OrderDirection!
|
||||
}
|
||||
type userOutput {
|
||||
avg_id:Int!
|
||||
count_id:Int!
|
||||
full_name:String
|
||||
id:Int!
|
||||
max_id:Int!
|
||||
min_id:Int!
|
||||
stddev_id:Int!
|
||||
stddev_pop_id:Int!
|
||||
stddev_samp_id:Int!
|
||||
var_pop_id:Int!
|
||||
var_samp_id:Int!
|
||||
variance_id:Int!
|
||||
}
|
||||
schema {
|
||||
mutation: Mutation
|
||||
query: Query
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package cockraochdb_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
integration_tests "github.com/dosco/super-graph/core/internal/integration_tests"
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCockroachDB(t *testing.T) {
|
||||
|
||||
url, found := os.LookupEnv("SG_POSTGRESQL_TEST_URL")
|
||||
if !found {
|
||||
t.Skip("set the SG_POSTGRESQL_TEST_URL env variable if you want to run integration tests against a PostgreSQL database")
|
||||
} else {
|
||||
db, err := sql.Open("pgx", url)
|
||||
require.NoError(t, err)
|
||||
|
||||
integration_tests.DropSchema(t, db)
|
||||
integration_tests.SetupSchema(t, db)
|
||||
integration_tests.TestSuperGraph(t, db, func(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
@ -1,214 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderBaseColumns(
|
||||
sel *qcode.Select,
|
||||
ti *DBTableInfo,
|
||||
childCols []*qcode.Column) ([]int, bool, error) {
|
||||
|
||||
var realColsRendered []int
|
||||
|
||||
colcount := (len(sel.Cols) + len(sel.OrderBy) + 1)
|
||||
colmap := make(map[string]struct{}, colcount)
|
||||
|
||||
isSearch := sel.Args["search"] != nil
|
||||
isCursorPaged := sel.Paging.Type != qcode.PtOffset
|
||||
isAgg := false
|
||||
|
||||
i := 0
|
||||
for n, col := range sel.Cols {
|
||||
cn := col.Name
|
||||
colmap[cn] = struct{}{}
|
||||
|
||||
_, isRealCol := ti.ColMap[cn]
|
||||
|
||||
if isRealCol {
|
||||
c.renderComma(i)
|
||||
realColsRendered = append(realColsRendered, n)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
|
||||
} else {
|
||||
switch {
|
||||
case isSearch && cn == "search_rank":
|
||||
if err := c.renderColumnSearchRank(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
case isSearch && strings.HasPrefix(cn, "search_headline_"):
|
||||
if err := c.renderColumnSearchHeadline(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
case cn == "__typename":
|
||||
if err := c.renderColumnTypename(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
case strings.HasSuffix(cn, "_cursor"):
|
||||
continue
|
||||
|
||||
default:
|
||||
if err := c.renderColumnFunction(sel, ti, col, i); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
isAgg = true
|
||||
}
|
||||
}
|
||||
i++
|
||||
|
||||
}
|
||||
|
||||
if isCursorPaged {
|
||||
if _, ok := colmap[ti.PrimaryCol.Key]; !ok {
|
||||
colmap[ti.PrimaryCol.Key] = struct{}{}
|
||||
c.renderComma(i)
|
||||
colWithTable(c.w, ti.Name, ti.PrimaryCol.Name)
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
for _, ob := range sel.OrderBy {
|
||||
if _, ok := colmap[ob.Col]; ok {
|
||||
continue
|
||||
}
|
||||
colmap[ob.Col] = struct{}{}
|
||||
c.renderComma(i)
|
||||
colWithTable(c.w, ti.Name, ob.Col)
|
||||
i++
|
||||
}
|
||||
|
||||
for _, col := range childCols {
|
||||
if _, ok := colmap[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
c.renderComma(i)
|
||||
colWithTable(c.w, col.Table, col.Name)
|
||||
i++
|
||||
}
|
||||
|
||||
return realColsRendered, isAgg, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnSearchRank(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
if isColumnBlocked(sel, col.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ti.TSVCol == nil {
|
||||
return errors.New("no ts_vector column found")
|
||||
}
|
||||
cn := ti.TSVCol.Name
|
||||
arg := sel.Args["search"]
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
//fmt.Fprintf(w, `ts_rank("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Name, cn, arg.Val, col.Name)
|
||||
_, _ = io.WriteString(c.w, `ts_rank(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
if c.schema.ver >= 110000 {
|
||||
_, _ = io.WriteString(c.w, `, websearch_to_tsquery(`)
|
||||
} else {
|
||||
_, _ = io.WriteString(c.w, `, to_tsquery(`)
|
||||
}
|
||||
c.md.renderValueExp(c.w, Param{Name: arg.Val, Type: "string"})
|
||||
_, _ = io.WriteString(c.w, `))`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnSearchHeadline(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
cn := col.Name[16:]
|
||||
|
||||
if isColumnBlocked(sel, cn) {
|
||||
return nil
|
||||
}
|
||||
arg := sel.Args["search"]
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
//fmt.Fprintf(w, `ts_headline("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Name, cn, arg.Val, col.Name)
|
||||
_, _ = io.WriteString(c.w, `ts_headline(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
if c.schema.ver >= 110000 {
|
||||
_, _ = io.WriteString(c.w, `, websearch_to_tsquery(`)
|
||||
} else {
|
||||
_, _ = io.WriteString(c.w, `, to_tsquery(`)
|
||||
}
|
||||
c.md.renderValueExp(c.w, Param{Name: arg.Val, Type: "string"})
|
||||
_, _ = io.WriteString(c.w, `))`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnTypename(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
if isColumnBlocked(sel, col.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
_, _ = io.WriteString(c.w, `(`)
|
||||
squoted(c.w, ti.Name)
|
||||
_, _ = io.WriteString(c.w, ` :: text)`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumnFunction(sel *qcode.Select, ti *DBTableInfo, col qcode.Column, columnsRendered int) error {
|
||||
pl := funcPrefixLen(c.schema.fm, col.Name)
|
||||
// if pl == 0 {
|
||||
// //fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
// _, _ = io.WriteString(c.w, `'`)
|
||||
// _, _ = io.WriteString(c.w, col.Name)
|
||||
// _, _ = io.WriteString(c.w, ` not defined'`)
|
||||
// alias(c.w, col.Name)
|
||||
// }
|
||||
|
||||
if pl == 0 || !sel.Functions {
|
||||
return nil
|
||||
}
|
||||
|
||||
cn := col.Name[pl:]
|
||||
|
||||
if isColumnBlocked(sel, cn) {
|
||||
return nil
|
||||
}
|
||||
|
||||
fn := col.Name[:pl-1]
|
||||
|
||||
c.renderComma(columnsRendered)
|
||||
|
||||
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Name, cn, col.Name)
|
||||
_, _ = io.WriteString(c.w, fn)
|
||||
_, _ = io.WriteString(c.w, `(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
_, _ = io.WriteString(c.w, `)`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderComma(columnsRendered int) {
|
||||
if columnsRendered != 0 {
|
||||
_, _ = io.WriteString(c.w, `, `)
|
||||
}
|
||||
}
|
||||
|
||||
func isColumnBlocked(sel *qcode.Select, name string) bool {
|
||||
if len(sel.Allowed) != 0 {
|
||||
if _, ok := sel.Allowed[name]; !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
// +build gofuzz
|
||||
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
|
||||
|
||||
schema, _ = GetTestSchema()
|
||||
|
||||
vars = map[string]string{
|
||||
"admin_account_id": "5",
|
||||
}
|
||||
|
||||
pcompileTest = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
)
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func Fuzz(data []byte) int {
|
||||
err1 := query(data)
|
||||
err2 := insert(data)
|
||||
err3 := update(data)
|
||||
err4 := delete(data)
|
||||
|
||||
if err1 != nil || err2 != nil || err3 != nil || err4 != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func query(data []byte) error {
|
||||
gql := data
|
||||
|
||||
qc, err1 := qcompileTest.Compile(gql, "user")
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err2 := pcompileTest.CompileEx(qc, vars)
|
||||
|
||||
if err1 != nil {
|
||||
return err1
|
||||
} else {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
|
||||
func insert(data []byte) error {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
qc, err := qcompileTest.Compile([]byte(gql), "user")
|
||||
if err != nil {
|
||||
panic("qcompile can't fail")
|
||||
}
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
return err
|
||||
}
|
||||
|
||||
func update(data []byte) error {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
qc, err := qcompileTest.Compile([]byte(gql), "user")
|
||||
if err != nil {
|
||||
panic("qcompile can't fail")
|
||||
}
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
return err
|
||||
}
|
||||
|
||||
func delete(data []byte) error {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
qc, err := qcompileTest.Compile([]byte(gql), "user")
|
||||
if err != nil {
|
||||
panic("qcompile can't fail")
|
||||
}
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(data),
|
||||
}
|
||||
|
||||
_, _, err = pcompileTest.CompileEx(qc, vars)
|
||||
return err
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
// +build gofuzz
|
||||
|
||||
package psql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var ret int
|
||||
|
||||
func TestFuzzCrashers(t *testing.T) {
|
||||
var crashers = []string{
|
||||
"{\"connect\":{}}",
|
||||
"q(q{q{q{q{q{q{q{q{",
|
||||
}
|
||||
|
||||
for _, f := range crashers {
|
||||
ret = Fuzz([]byte(f))
|
||||
}
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderInsert(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
insert, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
|
||||
}
|
||||
if len(insert) == 0 {
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT `)
|
||||
if insert[0] == '[' {
|
||||
io.WriteString(c.w, `json_array_elements(`)
|
||||
}
|
||||
c.md.renderValueExp(c.w, Param{Name: qc.ActionVar, Type: "json"})
|
||||
io.WriteString(c.w, ` :: json`)
|
||||
if insert[0] == '[' {
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
io.WriteString(c.w, ` AS j)`)
|
||||
|
||||
st := util.NewStack()
|
||||
st.Push(kvitem{_type: itemInsert, key: ti.Name, val: insert, ti: ti})
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
if insert[0] == '[' && st.Len() > 1 {
|
||||
return 0, errors.New("Nested bulk insert not supported")
|
||||
}
|
||||
intf := st.Pop()
|
||||
|
||||
switch item := intf.(type) {
|
||||
case kvitem:
|
||||
if err := c.handleKVItem(st, item); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case renitem:
|
||||
var err error
|
||||
|
||||
// if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
|
||||
// io.WriteString(c.w, ` WHERE false`)
|
||||
// }
|
||||
|
||||
switch item._type {
|
||||
case itemInsert:
|
||||
err = c.renderInsertStmt(qc, w, item)
|
||||
case itemConnect:
|
||||
err = c.renderConnectStmt(qc, w, item)
|
||||
case itemUnion:
|
||||
err = c.renderUnionStmt(w, item)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderInsertStmt(qc *qcode.QCode, w io.Writer, item renitem) error {
|
||||
|
||||
ti := item.ti
|
||||
jt := item.data
|
||||
sk := nestedInsertRelColumnsMap(item.kvitem)
|
||||
|
||||
io.WriteString(c.w, `, `)
|
||||
renderCteName(w, item.kvitem)
|
||||
io.WriteString(w, ` AS (`)
|
||||
|
||||
io.WriteString(w, `INSERT INTO `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, ` (`)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, false)
|
||||
renderNestedInsertRelColumns(w, item.kvitem, false)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
io.WriteString(w, ` SELECT `)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, true)
|
||||
renderNestedInsertRelColumns(w, item.kvitem, true)
|
||||
|
||||
io.WriteString(w, ` FROM "_sg_input" i`)
|
||||
renderNestedInsertRelTables(w, item.kvitem)
|
||||
io.WriteString(w, ` RETURNING *)`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func nestedInsertRelColumnsMap(item kvitem) map[string]struct{} {
|
||||
sk := make(map[string]struct{}, len(item.items))
|
||||
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
sk[item.relPC.Right.Col] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
sk[v.relCP.Right.Col] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sk
|
||||
}
|
||||
|
||||
func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
if values {
|
||||
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
|
||||
} else {
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Render child foreign key columns if child-to-parent
|
||||
// relationship is one-to-many
|
||||
i := 0
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
if i != 0 {
|
||||
io.WriteString(w, `, `)
|
||||
}
|
||||
if values {
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `".`)
|
||||
quoted(w, v.relCP.Left.Col)
|
||||
} else {
|
||||
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
|
||||
}
|
||||
} else {
|
||||
quoted(w, v.relCP.Right.Col)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
|
||||
if len(item.items) == 0 {
|
||||
if item.relPC != nil && item.relPC.Type == RelOneToMany {
|
||||
io.WriteString(w, `, `)
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
}
|
||||
} else {
|
||||
// Render tables needed to set values if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v.relCP.Type == RelOneToMany {
|
||||
io.WriteString(w, `, `)
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `"`)
|
||||
} else {
|
||||
quoted(w, v.relCP.Left.Table)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,271 +0,0 @@
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func simpleInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func singleInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: $id, insert: $insert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"insert": json.RawMessage(` { "name": "my_name", "price": 6.95, "description": "my_desc", "user_id": 5 }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "anon")
|
||||
}
|
||||
|
||||
func bulkInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(name: "test", id: $id, insert: $insert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"insert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "anon")
|
||||
}
|
||||
|
||||
func simpleInsertWithPresets(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{"name": "Tomato", "price": 5.76}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func nestedInsertManyToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
purchase(insert: $data) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(` {
|
||||
"sale_type": "bought",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToOne(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"hey": {
|
||||
"now": "what's the matter"
|
||||
},
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToManyWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToOneWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
tags {
|
||||
id
|
||||
name
|
||||
}
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": { "id": 5 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedInsertOneToOneWithConnectArray(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"connect": { "id": [1,2] }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func TestCompileInsert(t *testing.T) {
|
||||
t.Run("simpleInsert", simpleInsert)
|
||||
t.Run("singleInsert", singleInsert)
|
||||
t.Run("bulkInsert", bulkInsert)
|
||||
t.Run("simpleInsertWithPresets", simpleInsertWithPresets)
|
||||
t.Run("nestedInsertManyToMany", nestedInsertManyToMany)
|
||||
t.Run("nestedInsertOneToMany", nestedInsertOneToMany)
|
||||
t.Run("nestedInsertOneToOne", nestedInsertOneToOne)
|
||||
t.Run("nestedInsertOneToManyWithConnect", nestedInsertOneToManyWithConnect)
|
||||
t.Run("nestedInsertOneToOneWithConnect", nestedInsertOneToOneWithConnect)
|
||||
t.Run("nestedInsertOneToOneWithConnectArray", nestedInsertOneToOneWithConnectArray)
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func (md *Metadata) RenderVar(w io.Writer, vv string) {
|
||||
f, s := -1, 0
|
||||
|
||||
for i := range vv {
|
||||
v := vv[i]
|
||||
switch {
|
||||
case (i > 0 && vv[i-1] != '\\' && v == '$') || v == '$':
|
||||
if (i - s) > 0 {
|
||||
_, _ = io.WriteString(w, vv[s:i])
|
||||
}
|
||||
f = i
|
||||
|
||||
case (v < 'a' && v > 'z') &&
|
||||
(v < 'A' && v > 'Z') &&
|
||||
(v < '0' && v > '9') &&
|
||||
v != '_' &&
|
||||
f != -1 &&
|
||||
(i-f) > 1:
|
||||
md.renderValueExp(w, Param{Name: vv[f+1 : i]})
|
||||
s = i
|
||||
f = -1
|
||||
}
|
||||
}
|
||||
|
||||
if f != -1 && (len(vv)-f) > 1 {
|
||||
md.renderValueExp(w, Param{Name: vv[f+1:]})
|
||||
} else {
|
||||
_, _ = io.WriteString(w, vv[s:])
|
||||
}
|
||||
}
|
||||
|
||||
func (md *Metadata) renderValueExp(w io.Writer, p Param) {
|
||||
_, _ = io.WriteString(w, `$`)
|
||||
if v, ok := md.pindex[p.Name]; ok {
|
||||
int32String(w, int32(v))
|
||||
|
||||
} else {
|
||||
md.params = append(md.params, p)
|
||||
n := len(md.params)
|
||||
|
||||
if md.pindex == nil {
|
||||
md.pindex = make(map[string]int)
|
||||
}
|
||||
md.pindex[p.Name] = n
|
||||
int32String(w, int32(n))
|
||||
}
|
||||
}
|
||||
|
||||
func (md Metadata) Skipped() uint32 {
|
||||
return md.skipped
|
||||
}
|
||||
|
||||
func (md Metadata) Params() []Param {
|
||||
return md.params
|
||||
}
|
@ -1,718 +0,0 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemInsert itemType = iota + 1
|
||||
itemUpdate
|
||||
itemConnect
|
||||
itemDisconnect
|
||||
itemUnion
|
||||
)
|
||||
|
||||
var insertTypes = map[string]itemType{
|
||||
"connect": itemConnect,
|
||||
}
|
||||
|
||||
var updateTypes = map[string]itemType{
|
||||
"connect": itemConnect,
|
||||
"disconnect": itemDisconnect,
|
||||
}
|
||||
|
||||
var noLimit = qcode.Paging{NoLimit: true}
|
||||
|
||||
func (co *Compiler) compileMutation(w io.Writer, qc *qcode.QCode, vars Variables) (Metadata, error) {
|
||||
md := Metadata{}
|
||||
|
||||
if len(qc.Selects) == 0 {
|
||||
return md, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{md, w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
|
||||
ti, err := c.schema.GetTable(root.Name)
|
||||
if err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
switch qc.Type {
|
||||
case qcode.QTInsert:
|
||||
if _, err := c.renderInsert(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
case qcode.QTUpdate:
|
||||
if _, err := c.renderUpdate(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
case qcode.QTUpsert:
|
||||
if _, err := c.renderUpsert(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
case qcode.QTDelete:
|
||||
if _, err := c.renderDelete(w, qc, vars, ti); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
default:
|
||||
return c.md, errors.New("valid mutations are 'insert', 'update', 'upsert' and 'delete'")
|
||||
}
|
||||
|
||||
root.Paging = noLimit
|
||||
root.DistinctOn = root.DistinctOn[:]
|
||||
root.OrderBy = root.OrderBy[:]
|
||||
root.Where = nil
|
||||
root.Args = nil
|
||||
|
||||
return co.compileQueryWithMetadata(w, qc, vars, c.md)
|
||||
}
|
||||
|
||||
type kvitem struct {
|
||||
id int32
|
||||
_type itemType
|
||||
_ctype int
|
||||
key string
|
||||
path []string
|
||||
val json.RawMessage
|
||||
data map[string]json.RawMessage
|
||||
array bool
|
||||
ti *DBTableInfo
|
||||
relCP *DBRel
|
||||
relPC *DBRel
|
||||
items []kvitem
|
||||
}
|
||||
|
||||
type renitem struct {
|
||||
kvitem
|
||||
array bool
|
||||
data map[string]json.RawMessage
|
||||
}
|
||||
|
||||
// TODO: Handle cases where a column name matches the child table name
|
||||
// the child path needs to be exluded in the json sent to insert or update
|
||||
|
||||
func (c *compilerContext) handleKVItem(st *util.Stack, item kvitem) error {
|
||||
var data map[string]json.RawMessage
|
||||
var array bool
|
||||
var err error
|
||||
|
||||
if item.data == nil {
|
||||
data, array, err = jsn.Tree(item.val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
data, array = item.data, item.array
|
||||
}
|
||||
|
||||
var unionize bool
|
||||
id := item.id + 1
|
||||
|
||||
item.items = make([]kvitem, 0, len(data))
|
||||
|
||||
for k, v := range data {
|
||||
if v[0] != '{' && v[0] != '[' {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get child-to-parent relationship
|
||||
relCP, err := c.schema.GetRel(k, item.key)
|
||||
if err != nil {
|
||||
var ty itemType
|
||||
var ok bool
|
||||
|
||||
switch item._type {
|
||||
case itemInsert:
|
||||
ty, ok = insertTypes[k]
|
||||
case itemUpdate:
|
||||
ty, ok = updateTypes[k]
|
||||
}
|
||||
|
||||
if ok {
|
||||
unionize = true
|
||||
item1 := item
|
||||
item1._type = ty
|
||||
item1.id = id
|
||||
item1.val = v
|
||||
|
||||
item.items = append(item.items, item1)
|
||||
id++
|
||||
}
|
||||
|
||||
// Get parent-to-child relationship
|
||||
} else if relPC, err := c.schema.GetRel(item.key, k); err == nil {
|
||||
ti, err := c.schema.GetTable(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
item1 := kvitem{
|
||||
id: id,
|
||||
_type: item._type,
|
||||
key: k,
|
||||
val: v,
|
||||
path: append(item.path, k),
|
||||
ti: ti,
|
||||
relCP: relCP,
|
||||
relPC: relPC,
|
||||
}
|
||||
|
||||
if v[0] == '{' {
|
||||
item1.data, item1.array, err = jsn.Tree(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v1, ok := item1.data["connect"]; ok && (v1[0] == '{' || v1[0] == '[') {
|
||||
item1._ctype |= (1 << itemConnect)
|
||||
}
|
||||
if v1, ok := item1.data["disconnect"]; ok && (v1[0] == '{' || v1[0] == '[') {
|
||||
item1._ctype |= (1 << itemDisconnect)
|
||||
}
|
||||
}
|
||||
|
||||
item.items = append(item.items, item1)
|
||||
id++
|
||||
}
|
||||
}
|
||||
|
||||
if unionize {
|
||||
item._type = itemUnion
|
||||
}
|
||||
|
||||
// For inserts order the children according to
|
||||
// the creation order required by the parent-to-child
|
||||
// relationships. For example users need to be created
|
||||
// before the products they own.
|
||||
|
||||
// For updates the order defined in the query must be
|
||||
// the order used.
|
||||
switch item._type {
|
||||
case itemInsert:
|
||||
for _, v := range item.items {
|
||||
if v.relPC.Type == RelOneToMany {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
for _, v := range item.items {
|
||||
if v.relPC.Type == RelOneToOne {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
|
||||
case itemUpdate:
|
||||
for _, v := range item.items {
|
||||
if !(v._ctype > 0 && v.relPC.Type == RelOneToOne) {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relPC.Type == RelOneToOne {
|
||||
st.Push(v)
|
||||
}
|
||||
}
|
||||
|
||||
case itemUnion:
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
for _, v := range item.items {
|
||||
st.Push(v)
|
||||
}
|
||||
|
||||
default:
|
||||
for _, v := range item.items {
|
||||
st.Push(v)
|
||||
}
|
||||
st.Push(renitem{kvitem: item, array: array, data: data})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
|
||||
var connect, disconnect bool
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-many
|
||||
if item.relPC.Type != RelOneToMany {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, v := range item.items {
|
||||
if v._type == itemConnect {
|
||||
connect = true
|
||||
} else if v._type == itemDisconnect {
|
||||
disconnect = true
|
||||
}
|
||||
if connect && disconnect {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if connect {
|
||||
io.WriteString(w, `, `)
|
||||
if connect && disconnect {
|
||||
renderCteNameWithSuffix(w, item.kvitem, "c")
|
||||
} else {
|
||||
quoted(w, item.ti.Name)
|
||||
}
|
||||
io.WriteString(w, ` AS ( UPDATE `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, ` SET `)
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
io.WriteString(w, ` = `)
|
||||
|
||||
// When setting the id of the connected table in a one-to-many setting
|
||||
// we always overwrite the value including for array columns
|
||||
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
|
||||
|
||||
io.WriteString(w, ` FROM `)
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
io.WriteString(w, ` WHERE`)
|
||||
|
||||
i := 0
|
||||
for _, v := range item.items {
|
||||
if v._type == itemConnect {
|
||||
if i != 0 {
|
||||
io.WriteString(w, ` OR (`)
|
||||
} else {
|
||||
io.WriteString(w, ` (`)
|
||||
}
|
||||
if err := renderWhereFromJSON(w, v, "connect", v.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
i++
|
||||
}
|
||||
}
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, `.*)`)
|
||||
}
|
||||
|
||||
if disconnect {
|
||||
io.WriteString(w, `, `)
|
||||
if connect && disconnect {
|
||||
renderCteNameWithSuffix(w, item.kvitem, "d")
|
||||
} else {
|
||||
quoted(w, item.ti.Name)
|
||||
}
|
||||
io.WriteString(w, ` AS ( UPDATE `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, ` SET `)
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
io.WriteString(w, ` = `)
|
||||
|
||||
if item.relPC.Right.Array {
|
||||
io.WriteString(w, ` array_remove(`)
|
||||
quoted(w, item.relPC.Right.Col)
|
||||
io.WriteString(w, `, `)
|
||||
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else {
|
||||
io.WriteString(w, ` NULL`)
|
||||
}
|
||||
|
||||
io.WriteString(w, ` FROM `)
|
||||
quoted(w, item.relPC.Left.Table)
|
||||
io.WriteString(w, ` WHERE`)
|
||||
|
||||
i := 0
|
||||
for _, v := range item.items {
|
||||
if v._type == itemDisconnect {
|
||||
if i != 0 {
|
||||
io.WriteString(w, ` OR (`)
|
||||
} else {
|
||||
io.WriteString(w, ` (`)
|
||||
}
|
||||
if err := renderWhereFromJSON(w, v, "disconnect", v.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
i++
|
||||
}
|
||||
}
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, `.*)`)
|
||||
}
|
||||
|
||||
if connect && disconnect {
|
||||
io.WriteString(w, `, `)
|
||||
quoted(w, item.ti.Name)
|
||||
io.WriteString(w, ` AS (`)
|
||||
io.WriteString(w, `SELECT * FROM `)
|
||||
renderCteNameWithSuffix(w, item.kvitem, "c")
|
||||
io.WriteString(w, ` UNION ALL `)
|
||||
io.WriteString(w, `SELECT * FROM `)
|
||||
renderCteNameWithSuffix(w, item.kvitem, "d")
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderInsertUpdateColumns(
|
||||
qc *qcode.QCode,
|
||||
jt map[string]json.RawMessage,
|
||||
ti *DBTableInfo,
|
||||
skipcols map[string]struct{},
|
||||
isValues bool) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
renderedCol := false
|
||||
|
||||
n := 0
|
||||
for _, cn := range ti.Columns {
|
||||
if _, ok := skipcols[cn.Name]; ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := jt[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := root.PresetMap[cn.Key]; ok {
|
||||
continue
|
||||
}
|
||||
if len(root.Allowed) != 0 {
|
||||
if _, ok := root.Allowed[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
if isValues {
|
||||
io.WriteString(c.w, `CAST( i.j ->>`)
|
||||
io.WriteString(c.w, `'`)
|
||||
io.WriteString(c.w, cn.Name)
|
||||
io.WriteString(c.w, `' AS `)
|
||||
io.WriteString(c.w, cn.Type)
|
||||
io.WriteString(c.w, `)`)
|
||||
} else {
|
||||
quoted(c.w, cn.Name)
|
||||
}
|
||||
|
||||
if !renderedCol {
|
||||
renderedCol = true
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
for i := range root.PresetList {
|
||||
cn := root.PresetList[i]
|
||||
col, ok := ti.ColMap[cn]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := skipcols[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 || n != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
if isValues {
|
||||
val := root.PresetMap[cn]
|
||||
switch {
|
||||
case ok && len(val) > 1 && val[0] == '$':
|
||||
c.md.renderValueExp(c.w, Param{Name: val[1:], Type: col.Type})
|
||||
|
||||
case ok && strings.HasPrefix(val, "sql:"):
|
||||
io.WriteString(c.w, `(`)
|
||||
c.md.RenderVar(c.w, val[4:])
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
case ok:
|
||||
squoted(c.w, val)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` :: `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
} else {
|
||||
quoted(c.w, cn)
|
||||
}
|
||||
|
||||
if !renderedCol {
|
||||
renderedCol = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(skipcols) != 0 && renderedCol {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpsert(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
upsert, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
|
||||
}
|
||||
if len(upsert) == 0 {
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
if ti.PrimaryCol == nil {
|
||||
return 0, fmt.Errorf("no primary key column found")
|
||||
}
|
||||
|
||||
jt, _, err := jsn.Tree(upsert)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, err := c.renderInsert(w, qc, vars, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` ON CONFLICT (`)
|
||||
i := 0
|
||||
|
||||
for _, cn := range ti.Columns {
|
||||
if _, ok := jt[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if col, ok := ti.ColMap[cn.Key]; !ok || !(col.UniqueKey || col.PrimaryKey) {
|
||||
continue
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, cn.Name)
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
io.WriteString(c.w, ti.PrimaryCol.Name)
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
if root.Where != nil {
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` DO UPDATE SET `)
|
||||
|
||||
i = 0
|
||||
for _, cn := range ti.Columns {
|
||||
if _, ok := jt[cn.Key]; !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, cn.Name)
|
||||
io.WriteString(c.w, ` = EXCLUDED.`)
|
||||
io.WriteString(c.w, cn.Name)
|
||||
i++
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` RETURNING *) `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
|
||||
item renitem) error {
|
||||
|
||||
rel := item.relPC
|
||||
|
||||
if rel == nil {
|
||||
return errors.New("invalid connect value")
|
||||
}
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-one
|
||||
// For this to work the child needs to found first so it's primary key
|
||||
// can be set in the related column on the parent object.
|
||||
// Eg. Create product and connect a user to it.
|
||||
if rel.Type != RelOneToOne {
|
||||
return nil
|
||||
}
|
||||
|
||||
io.WriteString(w, `, "_x_`)
|
||||
io.WriteString(c.w, item.ti.Name)
|
||||
io.WriteString(c.w, `" AS (SELECT `)
|
||||
|
||||
if rel.Left.Array {
|
||||
io.WriteString(w, `array_agg(DISTINCT `)
|
||||
quoted(w, rel.Right.Col)
|
||||
io.WriteString(w, `) AS `)
|
||||
quoted(w, rel.Right.Col)
|
||||
|
||||
} else {
|
||||
quoted(w, rel.Right.Col)
|
||||
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM "_sg_input" i,`)
|
||||
quoted(c.w, item.ti.Name)
|
||||
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, ` LIMIT 1)`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDisconnectStmt(qc *qcode.QCode, w io.Writer,
|
||||
item renitem) error {
|
||||
|
||||
rel := item.relPC
|
||||
|
||||
// Render only for parent-to-child relationship of one-to-one
|
||||
// For this to work the child needs to found first so it's
|
||||
// null value can beset in the related column on the parent object.
|
||||
// Eg. Update product and diconnect the user from it.
|
||||
if rel.Type != RelOneToOne {
|
||||
return nil
|
||||
}
|
||||
io.WriteString(w, `, "_x_`)
|
||||
io.WriteString(c.w, item.ti.Name)
|
||||
io.WriteString(c.w, `" AS (`)
|
||||
|
||||
if rel.Right.Array {
|
||||
io.WriteString(c.w, `SELECT `)
|
||||
quoted(w, rel.Right.Col)
|
||||
io.WriteString(c.w, ` FROM "_sg_input" i,`)
|
||||
quoted(c.w, item.ti.Name)
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, ` LIMIT 1))`)
|
||||
|
||||
} else {
|
||||
io.WriteString(c.w, `SELECT * FROM (VALUES(NULL::`)
|
||||
io.WriteString(w, rel.Right.col.Type)
|
||||
io.WriteString(c.w, `)) AS LOOKUP(`)
|
||||
quoted(w, rel.Right.Col)
|
||||
io.WriteString(c.w, `))`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderWhereFromJSON(w io.Writer, item kvitem, key string, val []byte) error {
|
||||
var kv map[string]json.RawMessage
|
||||
ti := item.ti
|
||||
|
||||
if err := json.Unmarshal(val, &kv); err != nil {
|
||||
return err
|
||||
}
|
||||
i := 0
|
||||
for k, v := range kv {
|
||||
col, ok := ti.ColMap[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(w, ` AND `)
|
||||
}
|
||||
|
||||
if v[0] == '[' {
|
||||
colWithTable(w, ti.Name, k)
|
||||
|
||||
if col.Array {
|
||||
io.WriteString(w, ` && `)
|
||||
} else {
|
||||
io.WriteString(w, ` = `)
|
||||
}
|
||||
|
||||
io.WriteString(w, `ANY((select a::`)
|
||||
io.WriteString(w, col.Type)
|
||||
|
||||
io.WriteString(w, ` AS list from json_array_elements_text(`)
|
||||
renderPathJSON(w, item, key, k)
|
||||
io.WriteString(w, `::json) AS a))`)
|
||||
|
||||
} else if col.Array {
|
||||
io.WriteString(w, `(`)
|
||||
renderPathJSON(w, item, key, k)
|
||||
io.WriteString(w, `)::`)
|
||||
io.WriteString(w, col.Type)
|
||||
|
||||
io.WriteString(w, ` = ANY(`)
|
||||
colWithTable(w, ti.Name, k)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else {
|
||||
colWithTable(w, ti.Name, k)
|
||||
|
||||
io.WriteString(w, `= (`)
|
||||
renderPathJSON(w, item, key, k)
|
||||
io.WriteString(w, `)::`)
|
||||
io.WriteString(w, col.Type)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderPathJSON(w io.Writer, item kvitem, key1, key2 string) {
|
||||
io.WriteString(w, `(i.j->`)
|
||||
joinPath(w, item.path)
|
||||
io.WriteString(w, `->'`)
|
||||
io.WriteString(w, key1)
|
||||
io.WriteString(w, `'->>'`)
|
||||
io.WriteString(w, key2)
|
||||
io.WriteString(w, `')`)
|
||||
}
|
||||
|
||||
func renderCteName(w io.Writer, item kvitem) error {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, item.ti.Name)
|
||||
if item._type == itemConnect || item._type == itemDisconnect {
|
||||
io.WriteString(w, `_`)
|
||||
int32String(w, item.id)
|
||||
}
|
||||
io.WriteString(w, `"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderCteNameWithSuffix(w io.Writer, item kvitem, suffix string) error {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, item.ti.Name)
|
||||
io.WriteString(w, `_`)
|
||||
io.WriteString(w, suffix)
|
||||
io.WriteString(w, `"`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func joinPath(w io.Writer, path []string) {
|
||||
for i := range path {
|
||||
if i != 0 {
|
||||
io.WriteString(w, `->`)
|
||||
}
|
||||
io.WriteString(w, `'`)
|
||||
io.WriteString(w, path[i])
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
}
|
@ -1,123 +0,0 @@
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func singleUpsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(upsert: $upsert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func singleUpsertWhere(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(upsert: $upsert, where: { price : { gt: 3 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func bulkUpsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(upsert: $upsert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"upsert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func delete(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(delete: true, where: { id: { eq: 1 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
// func blockedInsert(t *testing.T) {
|
||||
// gql := `mutation {
|
||||
// user(insert: $data) {
|
||||
// id
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '$1' :: json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
// }
|
||||
|
||||
// resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// fmt.Println(string(resSQL))
|
||||
|
||||
// if string(resSQL) != sql {
|
||||
// t.Fatal(errNotExpected)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func blockedUpdate(t *testing.T) {
|
||||
// gql := `mutation {
|
||||
// user(where: { id: { lt: 5 } }, update: $data) {
|
||||
// id
|
||||
// email
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "users" AS (WITH "input" AS (SELECT '$1' :: json AS j) UPDATE "users" SET ("full_name", "email") = (SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t) WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
// }
|
||||
|
||||
// resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
// if string(resSQL) != sql {
|
||||
// t.Fatal(errNotExpected)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestCompileMutate(t *testing.T) {
|
||||
t.Run("singleUpsert", singleUpsert)
|
||||
t.Run("singleUpsertWhere", singleUpsertWhere)
|
||||
t.Run("bulkUpsert", bulkUpsert)
|
||||
t.Run("delete", delete)
|
||||
// t.Run("blockedInsert", blockedInsert)
|
||||
// t.Run("blockedUpdate", blockedUpdate)
|
||||
}
|
@ -1,241 +0,0 @@
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
const (
|
||||
errNotExpected = "Generated SQL did not match what was expected"
|
||||
headerMarker = "=== RUN"
|
||||
commentMarker = "---"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *psql.Compiler
|
||||
expected map[string][]string
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
|
||||
qcompile, err = qcode.NewCompiler(qcode.Config{
|
||||
Blocklist: []string{
|
||||
"secret",
|
||||
"password",
|
||||
"token",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "product", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "name", "price", "users", "customers"},
|
||||
Filters: []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
},
|
||||
Insert: qcode.InsertConfig{
|
||||
Presets: map[string]string{
|
||||
"user_id": "$user_id",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
},
|
||||
},
|
||||
Update: qcode.UpdateConfig{
|
||||
Filters: []string{"{ user_id: { eq: $user_id } }"},
|
||||
Presets: map[string]string{"updated_at": "now"},
|
||||
},
|
||||
Delete: qcode.DeleteConfig{
|
||||
Filters: []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("anon", "product", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "name"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("anon1", "product", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "name", "price"},
|
||||
DisableFunctions: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "users", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "full_name", "avatar", "email", "products"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("bad_dude", "users", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Filters: []string{"false"},
|
||||
DisableFunctions: true,
|
||||
},
|
||||
Insert: qcode.InsertConfig{
|
||||
Filters: []string{"false"},
|
||||
},
|
||||
Update: qcode.UpdateConfig{
|
||||
Filters: []string{"false"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "mes", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "full_name", "avatar"},
|
||||
Filters: []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = qcompile.AddRole("user", "customers", qcode.TRConfig{
|
||||
Query: qcode.QueryConfig{
|
||||
Columns: []string{"id", "email", "full_name", "products"},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
schema, err := psql.GetTestSchema()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
vars := map[string]string{
|
||||
"admin_account_id": "5",
|
||||
}
|
||||
|
||||
pcompile = psql.NewCompiler(psql.Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
|
||||
expected = make(map[string][]string)
|
||||
|
||||
b, err := ioutil.ReadFile("tests.sql")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
text := string(b)
|
||||
lines := strings.Split(text, "\n")
|
||||
|
||||
var h string
|
||||
|
||||
for _, v := range lines {
|
||||
switch {
|
||||
case strings.HasPrefix(v, headerMarker):
|
||||
h = strings.TrimSpace(v[len(headerMarker):])
|
||||
|
||||
case strings.HasPrefix(v, commentMarker):
|
||||
break
|
||||
|
||||
default:
|
||||
v := strings.TrimSpace(v)
|
||||
if len(v) != 0 {
|
||||
expected[h] = append(expected[h], v)
|
||||
}
|
||||
}
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(t *testing.T, gql string, vars psql.Variables, role string) {
|
||||
generateTestFile := false
|
||||
|
||||
if generateTestFile {
|
||||
var sqlStmts []string
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
qc, err := qcompile.Compile([]byte(gql), role)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, sqlB, err := pcompile.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sql := string(sqlB)
|
||||
|
||||
match := false
|
||||
for _, s := range sqlStmts {
|
||||
if sql == s {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !match {
|
||||
s := string(sql)
|
||||
sqlStmts = append(sqlStmts, s)
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
qc, err := qcompile.Compile([]byte(gql), role)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, sqlStmt, err := pcompile.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
failed := true
|
||||
|
||||
for _, sql := range expected[t.Name()] {
|
||||
if string(sqlStmt) == sql {
|
||||
failed = false
|
||||
}
|
||||
}
|
||||
|
||||
if failed {
|
||||
fmt.Println(string(sqlStmt))
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,1370 +0,0 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
closeBlock = 500
|
||||
)
|
||||
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
IsArray bool
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
skipped uint32
|
||||
params []Param
|
||||
pindex map[string]int
|
||||
}
|
||||
|
||||
type compilerContext struct {
|
||||
md Metadata
|
||||
w io.Writer
|
||||
s []qcode.Select
|
||||
*Compiler
|
||||
}
|
||||
|
||||
type Variables map[string]json.RawMessage
|
||||
|
||||
type Config struct {
|
||||
Schema *DBSchema
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
schema *DBSchema
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{
|
||||
schema: conf.Schema,
|
||||
vars: conf.Vars,
|
||||
}
|
||||
}
|
||||
|
||||
func (co *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
|
||||
return co.schema.SetRel(child, parent, rel)
|
||||
}
|
||||
|
||||
func (co *Compiler) IDColumn(table string) (*DBColumn, error) {
|
||||
ti, err := co.schema.GetTable(table)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ti.PrimaryCol == nil {
|
||||
return nil, fmt.Errorf("no primary key column found")
|
||||
}
|
||||
|
||||
return ti.PrimaryCol, nil
|
||||
}
|
||||
|
||||
func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (Metadata, []byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
metad, err := co.Compile(w, qc, vars)
|
||||
return metad, w.Bytes(), err
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(w io.Writer, qc *qcode.QCode, vars Variables) (Metadata, error) {
|
||||
return co.CompileWithMetadata(w, qc, vars, Metadata{})
|
||||
}
|
||||
|
||||
func (co *Compiler) CompileWithMetadata(w io.Writer, qc *qcode.QCode, vars Variables, md Metadata) (Metadata, error) {
|
||||
md.skipped = 0
|
||||
|
||||
if qc == nil {
|
||||
return md, fmt.Errorf("qcode is nil")
|
||||
}
|
||||
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQueryWithMetadata(w, qc, vars, md)
|
||||
|
||||
case qcode.QTInsert,
|
||||
qcode.QTUpdate,
|
||||
qcode.QTDelete,
|
||||
qcode.QTUpsert:
|
||||
return co.compileMutation(w, qc, vars)
|
||||
|
||||
default:
|
||||
return Metadata{}, fmt.Errorf("Unknown operation type %d", qc.Type)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (co *Compiler) compileQueryWithMetadata(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, md Metadata) (Metadata, error) {
|
||||
|
||||
if len(qc.Selects) == 0 {
|
||||
return md, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{md, w, qc.Selects, co}
|
||||
st := NewIntStack()
|
||||
i := 0
|
||||
|
||||
io.WriteString(c.w, `SELECT jsonb_build_object(`)
|
||||
for _, id := range qc.Roots {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
|
||||
root := &qc.Selects[id]
|
||||
|
||||
if root.SkipRender || len(root.Cols) == 0 {
|
||||
squoted(c.w, root.FieldName)
|
||||
io.WriteString(c.w, `, `)
|
||||
io.WriteString(c.w, `NULL`)
|
||||
|
||||
} else {
|
||||
st.Push(root.ID + closeBlock)
|
||||
st.Push(root.ID)
|
||||
c.renderRootSelect(root)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
if st.Len() != 0 {
|
||||
io.WriteString(c.w, `) as "__root" FROM `)
|
||||
} else {
|
||||
io.WriteString(c.w, `) as "__root"`)
|
||||
return c.md, nil
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
id := st.Pop()
|
||||
|
||||
if id < closeBlock {
|
||||
sel := &c.s[id]
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Name)
|
||||
if err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
if sel.Type != qcode.STUnion {
|
||||
if len(sel.Cols) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if sel.ParentID == -1 {
|
||||
io.WriteString(c.w, `(`)
|
||||
} else {
|
||||
c.renderLateralJoin(sel)
|
||||
}
|
||||
|
||||
if !ti.IsSingular {
|
||||
c.renderPluralSelect(sel, ti)
|
||||
}
|
||||
|
||||
if err := c.renderSelect(sel, ti, vars); err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, cid := range sel.Children {
|
||||
if hasBit(c.md.skipped, uint32(cid)) {
|
||||
continue
|
||||
}
|
||||
child := &c.s[cid]
|
||||
|
||||
if child.SkipRender {
|
||||
continue
|
||||
}
|
||||
st.Push(child.ID + closeBlock)
|
||||
st.Push(child.ID)
|
||||
}
|
||||
|
||||
} else {
|
||||
sel := &c.s[(id - closeBlock)]
|
||||
|
||||
if sel.Type != qcode.STUnion {
|
||||
ti, err := c.schema.GetTable(sel.Name)
|
||||
if err != nil {
|
||||
return c.md, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sr", sel.ID)
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sj", sel.ID)
|
||||
|
||||
if !ti.IsSingular {
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, "__sj", sel.ID)
|
||||
}
|
||||
|
||||
if sel.ParentID == -1 {
|
||||
if st.Len() != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
} else {
|
||||
c.renderLateralJoinClose(sel)
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Type != qcode.STMember {
|
||||
if len(sel.Args) != 0 {
|
||||
for _, v := range sel.Args {
|
||||
qcode.FreeNode(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return c.md, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderPluralSelect(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
io.WriteString(c.w, `SELECT coalesce(jsonb_agg("__sj_`)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."json"), '[]') as "json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
n := 0
|
||||
|
||||
// check if primary key already included in order by
|
||||
// query argument
|
||||
for _, ob := range sel.OrderBy {
|
||||
if ob.Col == ti.PrimaryCol.Key {
|
||||
n = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if n == 1 {
|
||||
n = len(sel.OrderBy)
|
||||
} else {
|
||||
n = len(sel.OrderBy) + 1
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `, CONCAT_WS(','`)
|
||||
for i := 0; i < n; i++ {
|
||||
io.WriteString(c.w, `, max("__cur_`)
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `")`)
|
||||
}
|
||||
io.WriteString(c.w, `) as "cursor"`)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRootSelect(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `'`)
|
||||
io.WriteString(c.w, sel.FieldName)
|
||||
io.WriteString(c.w, `', `)
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
io.WriteString(c.w, `, '`)
|
||||
io.WriteString(c.w, sel.FieldName)
|
||||
io.WriteString(c.w, `_cursor', `)
|
||||
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `"."cursor"`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) initSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) ([]*qcode.Column, error) {
|
||||
cols := make([]*qcode.Column, 0, len(sel.Cols))
|
||||
colmap := make(map[string]struct{}, len(sel.Cols))
|
||||
|
||||
for i := range sel.Cols {
|
||||
colmap[sel.Cols[i].Name] = struct{}{}
|
||||
}
|
||||
|
||||
for i := range sel.OrderBy {
|
||||
colmap[sel.OrderBy[i].Col] = struct{}{}
|
||||
}
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
colmap[ti.PrimaryCol.Key] = struct{}{}
|
||||
addPrimaryKey := true
|
||||
|
||||
for _, ob := range sel.OrderBy {
|
||||
if ob.Col == ti.PrimaryCol.Key {
|
||||
addPrimaryKey = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if addPrimaryKey {
|
||||
ob := &qcode.OrderBy{Col: ti.PrimaryCol.Name, Order: qcode.OrderAsc}
|
||||
|
||||
if sel.Paging.Type == qcode.PtBackward {
|
||||
ob.Order = qcode.OrderDesc
|
||||
}
|
||||
sel.OrderBy = append(sel.OrderBy, ob)
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Paging.Cursor {
|
||||
c.addSeekPredicate(sel)
|
||||
}
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Name, ti.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelOneToOne, RelOneToMany:
|
||||
if _, ok := colmap[rel.Right.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Right.Col, FieldName: rel.Right.Col})
|
||||
colmap[rel.Right.Col] = struct{}{}
|
||||
}
|
||||
|
||||
case RelOneToManyThrough:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
}
|
||||
|
||||
case RelEmbedded:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
}
|
||||
|
||||
case RelRemote:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Right.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
c.md.skipped |= (1 << uint(id))
|
||||
}
|
||||
|
||||
case RelPolymorphic:
|
||||
if _, ok := colmap[rel.Left.Col]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
|
||||
colmap[rel.Left.Col] = struct{}{}
|
||||
}
|
||||
if _, ok := colmap[rel.Right.Table]; !ok {
|
||||
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Right.Table, FieldName: rel.Right.Table})
|
||||
colmap[rel.Right.Table] = struct{}{}
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown relationship %s", rel)
|
||||
}
|
||||
}
|
||||
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
// This
|
||||
// (A, B, C) >= (X, Y, Z)
|
||||
//
|
||||
// Becomes
|
||||
// (A > X)
|
||||
// OR ((A = X) AND (B > Y))
|
||||
// OR ((A = X) AND (B = Y) AND (C > Z))
|
||||
// OR ((A = X) AND (B = Y) AND (C = Z))
|
||||
|
||||
func (c *compilerContext) addSeekPredicate(sel *qcode.Select) error {
|
||||
var or, and *qcode.Exp
|
||||
|
||||
obLen := len(sel.OrderBy)
|
||||
|
||||
if obLen > 1 {
|
||||
or = qcode.NewFilter()
|
||||
or.Op = qcode.OpOr
|
||||
}
|
||||
|
||||
for i := 0; i < obLen; i++ {
|
||||
if i > 0 {
|
||||
and = qcode.NewFilter()
|
||||
and.Op = qcode.OpAnd
|
||||
}
|
||||
|
||||
for n, ob := range sel.OrderBy {
|
||||
f := qcode.NewFilter()
|
||||
f.Col = ob.Col
|
||||
f.Type = qcode.ValRef
|
||||
f.Table = "__cur"
|
||||
f.Val = ob.Col
|
||||
|
||||
if obLen == 1 {
|
||||
qcode.AddFilter(sel, f)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case i > 0 && n != i:
|
||||
f.Op = qcode.OpEquals
|
||||
case ob.Order == qcode.OrderDesc:
|
||||
f.Op = qcode.OpLesserThan
|
||||
default:
|
||||
f.Op = qcode.OpGreaterThan
|
||||
}
|
||||
|
||||
if and != nil {
|
||||
and.Children = append(and.Children, f)
|
||||
} else {
|
||||
or.Children = append(or.Children, f)
|
||||
}
|
||||
|
||||
if n == i {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if and != nil {
|
||||
or.Children = append(or.Children, and)
|
||||
}
|
||||
}
|
||||
|
||||
qcode.AddFilter(sel, or)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo, vars Variables) error {
|
||||
var rel *DBRel
|
||||
var err error
|
||||
|
||||
// Relationships must be between union parents and their parents
|
||||
if sel.ParentID != -1 {
|
||||
if sel.Type == qcode.STMember && sel.UParentID != -1 {
|
||||
cn := c.s[sel.ParentID].Name
|
||||
pn := c.s[sel.UParentID].Name
|
||||
rel, err = c.schema.GetRel(cn, pn)
|
||||
|
||||
} else {
|
||||
pn := c.s[sel.ParentID].Name
|
||||
rel, err = c.schema.GetRel(ti.Name, pn)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
childCols, err := c.initSelect(sel, ti, vars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// SELECT
|
||||
// io.WriteString(c.w, `SELECT jsonb_build_object(`)
|
||||
// if err := c.renderColumns(sel, ti, skipped); err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
|
||||
io.WriteString(c.w, `SELECT to_jsonb("__sr_`)
|
||||
int32String(c.w, sel.ID)
|
||||
io.WriteString(c.w, `".*) `)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i := range sel.OrderBy {
|
||||
io.WriteString(c.w, `- '__cur_`)
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `' `)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `AS "json"`)
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i := range sel.OrderBy {
|
||||
io.WriteString(c.w, `, "__cur_`)
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `FROM (SELECT `)
|
||||
|
||||
if err := c.renderColumns(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sel.Paging.Type != qcode.PtOffset {
|
||||
for i, ob := range sel.OrderBy {
|
||||
io.WriteString(c.w, `, LAST_VALUE(`)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, ob.Col)
|
||||
io.WriteString(c.w, `) OVER() AS "__cur_`)
|
||||
int32String(c.w, int32(i))
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM (`)
|
||||
|
||||
// FROM (SELECT .... )
|
||||
if err = c.renderBaseSelect(sel, ti, rel, childCols); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Name, c.sel.ID)
|
||||
io.WriteString(c.w, `)`)
|
||||
aliasWithID(c.w, ti.Name, sel.ID)
|
||||
|
||||
// END-FROM
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderLateralJoin(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, ` LEFT OUTER JOIN LATERAL (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderLateralJoinClose(sel *qcode.Select) error {
|
||||
// io.WriteString(c.w, `) `)
|
||||
// aliasWithID(c.w, "__sj", sel.ID)
|
||||
io.WriteString(c.w, ` ON ('true')`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoin(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
parent := &c.s[sel.ParentID]
|
||||
return c.renderJoinByName(ti.Name, parent.Name, parent.ID)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinByName(table, parent string, id int32) error {
|
||||
rel, _ := c.schema.GetRel(table, parent)
|
||||
|
||||
// This join is only required for one-to-many relations since
|
||||
// these make use of join tables that need to be pulled in.
|
||||
if rel == nil || rel.Type != RelOneToManyThrough {
|
||||
return nil
|
||||
}
|
||||
|
||||
// pt, err := c.schema.GetTable(parent)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
//fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//rel.Through, rel.Through, rel.ColT, c.parent.Name, c.parent.ID, rel.Left.Col)
|
||||
io.WriteString(c.w, ` LEFT OUTER JOIN "`)
|
||||
io.WriteString(c.w, rel.Through.Table)
|
||||
io.WriteString(c.w, `" ON ((`)
|
||||
colWithTable(c.w, rel.Through.Table, rel.Through.ColL)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
i := 0
|
||||
var cn string
|
||||
|
||||
for _, col := range sel.Cols {
|
||||
if n := funcPrefixLen(c.schema.fm, col.Name); n != 0 {
|
||||
if !sel.Functions {
|
||||
continue
|
||||
}
|
||||
cn = col.Name[n:]
|
||||
} else {
|
||||
cn = col.Name
|
||||
|
||||
if strings.HasSuffix(cn, "_cursor") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(sel.Allowed) != 0 {
|
||||
if _, ok := sel.Allowed[cn]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
|
||||
colWithTableID(c.w, ti.Name, sel.ID, col.Name)
|
||||
alias(c.w, col.FieldName)
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
i += c.renderRemoteRelColumns(sel, ti, i)
|
||||
|
||||
return c.renderJoinColumns(sel, ti, i)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo, colsRendered int) int {
|
||||
i := colsRendered
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Name, sel.Name)
|
||||
if err != nil || rel.Type != RelRemote {
|
||||
continue
|
||||
}
|
||||
if i != 0 || len(sel.Cols) != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
|
||||
colWithTableID(c.w, ti.Name, sel.ID, rel.Left.Col)
|
||||
alias(c.w, rel.Right.Col)
|
||||
i++
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinColumns(sel *qcode.Select, ti *DBTableInfo, colsRendered int) error {
|
||||
// columns previously rendered
|
||||
i := colsRendered
|
||||
|
||||
for _, id := range sel.Children {
|
||||
if hasBit(c.md.skipped, uint32(id)) {
|
||||
continue
|
||||
}
|
||||
childSel := &c.s[id]
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
|
||||
if childSel.SkipRender {
|
||||
io.WriteString(c.w, `NULL`)
|
||||
alias(c.w, childSel.FieldName)
|
||||
continue
|
||||
}
|
||||
|
||||
if childSel.Type == qcode.STUnion {
|
||||
rel, err := c.schema.GetRel(childSel.Name, ti.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, `(CASE `)
|
||||
for _, uid := range childSel.Children {
|
||||
unionSel := &c.s[uid]
|
||||
|
||||
io.WriteString(c.w, `WHEN `)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, rel.Right.Table)
|
||||
io.WriteString(c.w, ` = `)
|
||||
squoted(c.w, unionSel.Name)
|
||||
io.WriteString(c.w, ` THEN `)
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int32String(c.w, unionSel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
}
|
||||
io.WriteString(c.w, `END)`)
|
||||
alias(c.w, childSel.FieldName)
|
||||
|
||||
} else {
|
||||
io.WriteString(c.w, `"__sj_`)
|
||||
int32String(c.w, childSel.ID)
|
||||
io.WriteString(c.w, `"."json"`)
|
||||
alias(c.w, childSel.FieldName)
|
||||
}
|
||||
|
||||
if childSel.Paging.Type != qcode.PtOffset {
|
||||
io.WriteString(c.w, `, "__sj_`)
|
||||
int32String(c.w, childSel.ID)
|
||||
io.WriteString(c.w, `"."cursor" AS "`)
|
||||
io.WriteString(c.w, childSel.FieldName)
|
||||
io.WriteString(c.w, `_cursor"`)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, rel *DBRel,
|
||||
childCols []*qcode.Column) error {
|
||||
isRoot := (rel == nil)
|
||||
isFil := (sel.Where != nil && sel.Where.Op != qcode.OpNop)
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
|
||||
if sel.Paging.Cursor {
|
||||
c.renderCursorCTE(sel)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `SELECT `)
|
||||
|
||||
if len(sel.DistinctOn) != 0 {
|
||||
c.renderDistinctOn(sel, ti)
|
||||
}
|
||||
|
||||
realColsRendered, isAgg, err := c.renderBaseColumns(sel, ti, childCols)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` FROM `)
|
||||
|
||||
c.renderFrom(sel, ti, rel)
|
||||
|
||||
if isRoot && isFil {
|
||||
io.WriteString(c.w, ` WHERE (`)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
if !isRoot {
|
||||
if err := c.renderJoin(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` WHERE (`)
|
||||
|
||||
if err := c.renderRelationship(sel, rel); err != nil {
|
||||
return err
|
||||
}
|
||||
if isFil {
|
||||
io.WriteString(c.w, ` AND `)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
if isAgg && len(realColsRendered) != 0 {
|
||||
io.WriteString(c.w, ` GROUP BY `)
|
||||
|
||||
for i, id := range realColsRendered {
|
||||
c.renderComma(i)
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, c.sel.Cols[id].Name)
|
||||
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
|
||||
}
|
||||
}
|
||||
|
||||
if hasOrder {
|
||||
if err := c.renderOrderBy(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case ti.IsSingular:
|
||||
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
|
||||
|
||||
case sel.Paging.Limit != "":
|
||||
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
|
||||
io.WriteString(c.w, ` LIMIT ('`)
|
||||
io.WriteString(c.w, sel.Paging.Limit)
|
||||
io.WriteString(c.w, `') :: integer`)
|
||||
|
||||
case sel.Paging.NoLimit:
|
||||
break
|
||||
|
||||
default:
|
||||
io.WriteString(c.w, ` LIMIT ('20') :: integer`)
|
||||
}
|
||||
|
||||
if sel.Paging.Offset != "" {
|
||||
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
|
||||
io.WriteString(c.w, ` OFFSET ('`)
|
||||
io.WriteString(c.w, sel.Paging.Offset)
|
||||
io.WriteString(c.w, `') :: integer`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderFrom(sel *qcode.Select, ti *DBTableInfo, rel *DBRel) error {
|
||||
if rel != nil && rel.Type == RelEmbedded {
|
||||
// jsonb_to_recordset('[{"a":1,"b":[1,2,3],"c":"bar"}, {"a":2,"b":[1,2,3],"c":"bar"}]') as x(a int, b text, d text);
|
||||
|
||||
io.WriteString(c.w, `"`)
|
||||
io.WriteString(c.w, rel.Left.Table)
|
||||
io.WriteString(c.w, `", `)
|
||||
|
||||
io.WriteString(c.w, ti.Type)
|
||||
io.WriteString(c.w, `_to_recordset(`)
|
||||
colWithTable(c.w, rel.Left.Table, rel.Right.Col)
|
||||
io.WriteString(c.w, `) AS `)
|
||||
|
||||
io.WriteString(c.w, `"`)
|
||||
io.WriteString(c.w, ti.Name)
|
||||
io.WriteString(c.w, `"`)
|
||||
|
||||
io.WriteString(c.w, `(`)
|
||||
for i, col := range ti.Columns {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, col.Name)
|
||||
io.WriteString(c.w, ` `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
} else {
|
||||
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
|
||||
io.WriteString(c.w, `"`)
|
||||
io.WriteString(c.w, ti.Name)
|
||||
io.WriteString(c.w, `"`)
|
||||
}
|
||||
|
||||
if sel.Paging.Cursor {
|
||||
io.WriteString(c.w, `, "__cur"`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderCursorCTE(sel *qcode.Select) error {
|
||||
io.WriteString(c.w, `WITH "__cur" AS (SELECT `)
|
||||
for i, ob := range sel.OrderBy {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
io.WriteString(c.w, `a[`)
|
||||
int32String(c.w, int32(i+1))
|
||||
io.WriteString(c.w, `] as `)
|
||||
quoted(c.w, ob.Col)
|
||||
}
|
||||
io.WriteString(c.w, ` FROM string_to_array(`)
|
||||
c.md.renderValueExp(c.w, Param{Name: "cursor", Type: "json"})
|
||||
io.WriteString(c.w, `, ',') as a) `)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRelationshipByName(table, parent string) error {
|
||||
rel, err := c.schema.GetRel(table, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.renderRelationship(nil, rel)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRelationship(sel *qcode.Select, rel *DBRel) error {
|
||||
var pid int32
|
||||
|
||||
switch {
|
||||
case sel == nil:
|
||||
pid = int32(-1)
|
||||
case sel.Type == qcode.STMember:
|
||||
pid = sel.UParentID
|
||||
default:
|
||||
pid = sel.ParentID
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `((`)
|
||||
|
||||
switch rel.Type {
|
||||
case RelOneToOne, RelOneToMany:
|
||||
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//c.sel.Name, rel.Left.Col, c.parent.Name, c.parent.ID, rel.Right.Col)
|
||||
|
||||
switch {
|
||||
case !rel.Left.Array && rel.Right.Array:
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTableID(c.w, rel.Right.Table, pid, rel.Right.Col)
|
||||
|
||||
case rel.Left.Array && !rel.Right.Array:
|
||||
colWithTableID(c.w, rel.Right.Table, pid, rel.Right.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
|
||||
default:
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTableID(c.w, rel.Right.Table, pid, rel.Right.Col)
|
||||
}
|
||||
|
||||
case RelOneToManyThrough:
|
||||
// This requires the through table to be joined onto this select
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s"."%s"))`,
|
||||
//c.sel.Name, rel.Left.Col, rel.Through, rel.Right.Col)
|
||||
|
||||
switch {
|
||||
case !rel.Left.Array && rel.Right.Array:
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTable(c.w, rel.Through.Table, rel.Through.ColR)
|
||||
|
||||
case rel.Left.Array && !rel.Right.Array:
|
||||
colWithTable(c.w, rel.Through.Table, rel.Through.ColR)
|
||||
io.WriteString(c.w, `) = any (`)
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
|
||||
default:
|
||||
colWithTable(c.w, rel.Through.Table, rel.Through.ColR)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTable(c.w, rel.Right.Table, rel.Right.Col)
|
||||
}
|
||||
|
||||
case RelEmbedded:
|
||||
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTableID(c.w, rel.Left.Table, pid, rel.Left.Col)
|
||||
|
||||
case RelPolymorphic:
|
||||
colWithTable(c.w, sel.Name, rel.Right.Col)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
colWithTableID(c.w, rel.Left.Table, pid, rel.Left.Col)
|
||||
io.WriteString(c.w, `) AND (`)
|
||||
colWithTableID(c.w, rel.Left.Table, pid, rel.Right.Table)
|
||||
io.WriteString(c.w, `) = (`)
|
||||
squoted(c.w, sel.Name)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
if sel.Where != nil {
|
||||
return c.renderExp(sel.Where, ti, false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested bool) error {
|
||||
st := util.NewStack()
|
||||
st.Push(ex)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
switch val := intf.(type) {
|
||||
case int32:
|
||||
switch val {
|
||||
case '(':
|
||||
io.WriteString(c.w, `(`)
|
||||
case ')':
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
case qcode.ExpOp:
|
||||
switch val {
|
||||
case qcode.OpAnd:
|
||||
io.WriteString(c.w, ` AND `)
|
||||
case qcode.OpOr:
|
||||
io.WriteString(c.w, ` OR `)
|
||||
case qcode.OpNot:
|
||||
io.WriteString(c.w, `NOT `)
|
||||
case qcode.OpFalse:
|
||||
io.WriteString(c.w, `false`)
|
||||
default:
|
||||
return fmt.Errorf("11: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
case *qcode.Exp:
|
||||
switch val.Op {
|
||||
case qcode.OpFalse:
|
||||
st.Push(val.Op)
|
||||
|
||||
case qcode.OpAnd, qcode.OpOr:
|
||||
st.Push(')')
|
||||
for i := len(val.Children) - 1; i >= 0; i-- {
|
||||
st.Push(val.Children[i])
|
||||
if i > 0 {
|
||||
st.Push(val.Op)
|
||||
}
|
||||
}
|
||||
st.Push('(')
|
||||
|
||||
case qcode.OpNot:
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
|
||||
default:
|
||||
if !skipNested && len(val.NestedCols) != 0 {
|
||||
io.WriteString(c.w, `EXISTS `)
|
||||
|
||||
if err := c.renderNestedWhere(val, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else if err := c.renderOp(val, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//qcode.FreeExp(val)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("12: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderNestedWhere(ex *qcode.Exp, ti *DBTableInfo) error {
|
||||
for i := 0; i < len(ex.NestedCols)-1; i++ {
|
||||
cti, err := c.schema.GetTable(ex.NestedCols[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ` AND `)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `(SELECT 1 FROM `)
|
||||
io.WriteString(c.w, cti.Name)
|
||||
|
||||
if err := c.renderJoinByName(cti.Name, ti.Name, -1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderRelationshipByName(cti.Name, ti.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` AND (`)
|
||||
|
||||
if err := c.renderExp(ex, cti, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
}
|
||||
|
||||
for i := 0; i < len(ex.NestedCols)-1; i++ {
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
|
||||
var col *DBColumn
|
||||
var ok bool
|
||||
|
||||
if ex.Op == qcode.OpNop {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ex.Col != "" {
|
||||
if col, ok = ti.ColMap[ex.Col]; !ok {
|
||||
return fmt.Errorf("no column '%s' found ", ex.Col)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ex.Col)
|
||||
io.WriteString(c.w, `) `)
|
||||
}
|
||||
|
||||
switch ex.Op {
|
||||
case qcode.OpEquals:
|
||||
io.WriteString(c.w, `=`)
|
||||
case qcode.OpNotEquals:
|
||||
io.WriteString(c.w, `!=`)
|
||||
case qcode.OpNotDistinct:
|
||||
io.WriteString(c.w, `IS NOT DISTINCT FROM`)
|
||||
case qcode.OpDistinct:
|
||||
io.WriteString(c.w, `IS DISTINCT FROM`)
|
||||
case qcode.OpGreaterOrEquals:
|
||||
io.WriteString(c.w, `>=`)
|
||||
case qcode.OpLesserOrEquals:
|
||||
io.WriteString(c.w, `<=`)
|
||||
case qcode.OpGreaterThan:
|
||||
io.WriteString(c.w, `>`)
|
||||
case qcode.OpLesserThan:
|
||||
io.WriteString(c.w, `<`)
|
||||
case qcode.OpIn:
|
||||
io.WriteString(c.w, `= ANY`)
|
||||
case qcode.OpNotIn:
|
||||
io.WriteString(c.w, `!= ANY`)
|
||||
case qcode.OpLike:
|
||||
io.WriteString(c.w, `LIKE`)
|
||||
case qcode.OpNotLike:
|
||||
io.WriteString(c.w, `NOT LIKE`)
|
||||
case qcode.OpILike:
|
||||
io.WriteString(c.w, `ILIKE`)
|
||||
case qcode.OpNotILike:
|
||||
io.WriteString(c.w, `NOT ILIKE`)
|
||||
case qcode.OpSimilar:
|
||||
io.WriteString(c.w, `SIMILAR TO`)
|
||||
case qcode.OpNotSimilar:
|
||||
io.WriteString(c.w, `NOT SIMILAR TO`)
|
||||
case qcode.OpContains:
|
||||
io.WriteString(c.w, `@>`)
|
||||
case qcode.OpContainedIn:
|
||||
io.WriteString(c.w, `<@`)
|
||||
case qcode.OpHasKey:
|
||||
io.WriteString(c.w, `?`)
|
||||
case qcode.OpHasKeyAny:
|
||||
io.WriteString(c.w, `?|`)
|
||||
case qcode.OpHasKeyAll:
|
||||
io.WriteString(c.w, `?&`)
|
||||
case qcode.OpIsNull:
|
||||
if strings.EqualFold(ex.Val, "true") {
|
||||
io.WriteString(c.w, `IS NULL)`)
|
||||
} else {
|
||||
io.WriteString(c.w, `IS NOT NULL)`)
|
||||
}
|
||||
return nil
|
||||
|
||||
case qcode.OpEqID:
|
||||
if ti.PrimaryCol == nil {
|
||||
return fmt.Errorf("no primary key column defined for %s", ti.Name)
|
||||
}
|
||||
col = ti.PrimaryCol
|
||||
//fmt.Fprintf(w, `(("%s") =`, c.ti.PrimaryCol)
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ti.PrimaryCol.Name)
|
||||
//io.WriteString(c.w, ti.PrimaryCol)
|
||||
io.WriteString(c.w, `) =`)
|
||||
|
||||
case qcode.OpTsQuery:
|
||||
if ti.PrimaryCol == nil {
|
||||
return fmt.Errorf("no tsv column defined for %s", ti.Name)
|
||||
}
|
||||
//fmt.Fprintf(w, `(("%s") @@ websearch_to_tsquery('%s'))`, c.ti.TSVCol, val.Val)
|
||||
io.WriteString(c.w, `((`)
|
||||
colWithTable(c.w, ti.Name, ti.TSVCol.Name)
|
||||
if c.schema.ver >= 110000 {
|
||||
io.WriteString(c.w, `) @@ websearch_to_tsquery(`)
|
||||
} else {
|
||||
io.WriteString(c.w, `) @@ to_tsquery(`)
|
||||
}
|
||||
c.md.renderValueExp(c.w, Param{Name: ex.Val, Type: "string"})
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected op code %d", ex.Op)
|
||||
}
|
||||
|
||||
switch {
|
||||
case ex.Type == qcode.ValList:
|
||||
c.renderList(ex)
|
||||
case col == nil:
|
||||
return errors.New("no column found for expression value")
|
||||
default:
|
||||
c.renderVal(ex, c.vars, col)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `)`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOrderBy(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
io.WriteString(c.w, ` ORDER BY `)
|
||||
for i := range sel.OrderBy {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
ob := sel.OrderBy[i]
|
||||
colWithTable(c.w, ti.Name, ob.Col)
|
||||
|
||||
switch ob.Order {
|
||||
case qcode.OrderAsc:
|
||||
io.WriteString(c.w, ` ASC`)
|
||||
case qcode.OrderDesc:
|
||||
io.WriteString(c.w, ` DESC`)
|
||||
case qcode.OrderAscNullsFirst:
|
||||
io.WriteString(c.w, ` ASC NULLS FIRST`)
|
||||
case qcode.OrderDescNullsFirst:
|
||||
io.WriteString(c.w, ` DESC NULLLS FIRST`)
|
||||
case qcode.OrderAscNullsLast:
|
||||
io.WriteString(c.w, ` ASC NULLS LAST`)
|
||||
case qcode.OrderDescNullsLast:
|
||||
io.WriteString(c.w, ` DESC NULLS LAST`)
|
||||
default:
|
||||
return fmt.Errorf("13: unexpected value %v", ob.Order)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDistinctOn(sel *qcode.Select, ti *DBTableInfo) {
|
||||
io.WriteString(c.w, `DISTINCT ON (`)
|
||||
for i := range sel.DistinctOn {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
colWithTable(c.w, ti.Name, sel.DistinctOn[i])
|
||||
}
|
||||
io.WriteString(c.w, `) `)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderList(ex *qcode.Exp) {
|
||||
io.WriteString(c.w, ` (`)
|
||||
for i := range ex.ListVal {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
switch ex.ListType {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
io.WriteString(c.w, ex.ListVal[i])
|
||||
case qcode.ValStr:
|
||||
io.WriteString(c.w, `'`)
|
||||
io.WriteString(c.w, ex.ListVal[i])
|
||||
io.WriteString(c.w, `'`)
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, `)`)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *DBColumn) {
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
switch ex.Type {
|
||||
case qcode.ValVar:
|
||||
val, ok := vars[ex.Val]
|
||||
switch {
|
||||
case ok && strings.HasPrefix(val, "sql:"):
|
||||
io.WriteString(c.w, `(`)
|
||||
c.md.RenderVar(c.w, val[4:])
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
case ok:
|
||||
squoted(c.w, val)
|
||||
|
||||
case ex.Op == qcode.OpIn || ex.Op == qcode.OpNotIn:
|
||||
io.WriteString(c.w, `(ARRAY(SELECT json_array_elements_text(`)
|
||||
c.md.renderValueExp(c.w, Param{Name: ex.Val, Type: col.Type, IsArray: true})
|
||||
io.WriteString(c.w, `))`)
|
||||
|
||||
io.WriteString(c.w, ` :: `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
io.WriteString(c.w, `[])`)
|
||||
return
|
||||
|
||||
default:
|
||||
c.md.renderValueExp(c.w, Param{Name: ex.Val, Type: col.Type, IsArray: false})
|
||||
}
|
||||
|
||||
case qcode.ValRef:
|
||||
colWithTable(c.w, ex.Table, ex.Col)
|
||||
|
||||
default:
|
||||
squoted(c.w, ex.Val)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` :: `)
|
||||
io.WriteString(c.w, col.Type)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fm map[string]*DBFunction, fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "count_"):
|
||||
return 6
|
||||
case strings.HasPrefix(fn, "max_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "min_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "sum_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "stddev_"):
|
||||
return 7
|
||||
case strings.HasPrefix(fn, "stddev_pop_"):
|
||||
return 11
|
||||
case strings.HasPrefix(fn, "stddev_samp_"):
|
||||
return 12
|
||||
case strings.HasPrefix(fn, "variance_"):
|
||||
return 9
|
||||
case strings.HasPrefix(fn, "var_pop_"):
|
||||
return 8
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
fnLen := len(fn)
|
||||
|
||||
for k := range fm {
|
||||
kLen := len(k)
|
||||
if kLen < fnLen && k[0] == fn[0] && strings.HasPrefix(fn, k) && fn[kLen] == '_' {
|
||||
return kLen + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func hasBit(n, pos uint32) bool {
|
||||
val := n & (1 << pos)
|
||||
return (val > 0)
|
||||
}
|
||||
|
||||
func alias(w io.Writer, alias string) {
|
||||
io.WriteString(w, ` AS "`)
|
||||
io.WriteString(w, alias)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func aliasWithID(w io.Writer, alias string, id int32) {
|
||||
io.WriteString(w, ` AS "`)
|
||||
io.WriteString(w, alias)
|
||||
io.WriteString(w, `_`)
|
||||
int32String(w, id)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func colWithTable(w io.Writer, table, col string) {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, table)
|
||||
io.WriteString(w, `"."`)
|
||||
io.WriteString(w, col)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func colWithTableID(w io.Writer, table string, id int32, col string) {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, table)
|
||||
if id >= 0 {
|
||||
io.WriteString(w, `_`)
|
||||
int32String(w, id)
|
||||
}
|
||||
io.WriteString(w, `"."`)
|
||||
io.WriteString(w, col)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func quoted(w io.Writer, identifier string) {
|
||||
io.WriteString(w, `"`)
|
||||
io.WriteString(w, identifier)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
|
||||
func squoted(w io.Writer, identifier string) {
|
||||
io.WriteString(w, `'`)
|
||||
io.WriteString(w, identifier)
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
|
||||
func int32String(w io.Writer, val int32) {
|
||||
io.WriteString(w, strconv.FormatInt(int64(val), 10))
|
||||
}
|
@ -1,572 +0,0 @@
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func withComplexArgs(t *testing.T) {
|
||||
gql := `query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 20 and < 28 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereIn(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { in: $list } }) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"list": json.RawMessage(`[1,2,3]`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func withWhereAndList(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: [
|
||||
{ not: { id: { is_null: true } } },
|
||||
{ price: { gt: 10 } },
|
||||
] } ) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereIsNull(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereMultiOr(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func fetchByID(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: $id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func searchQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
products(search: $query) {
|
||||
id
|
||||
name
|
||||
search_rank
|
||||
search_headline_description
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "admin")
|
||||
}
|
||||
|
||||
func oneToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
email
|
||||
products {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func oneToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func oneToManyArray(t *testing.T) {
|
||||
gql := `
|
||||
query {
|
||||
product {
|
||||
name
|
||||
price
|
||||
tags {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
tags {
|
||||
name
|
||||
product {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "admin")
|
||||
}
|
||||
|
||||
func manyToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func manyToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
products {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func aggFunction(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func aggFunctionBlockedByCol(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
func aggFunctionDisabled(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon1")
|
||||
}
|
||||
|
||||
func aggFunctionWithFilter(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { gt: 10 } }) {
|
||||
id
|
||||
max_price
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func queryWithVariables(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: $PRODUCT_ID, where: { price: { eq: $PRODUCT_PRICE } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withWhereOnRelations(t *testing.T) {
|
||||
gql := `query {
|
||||
users(where: {
|
||||
not: {
|
||||
products: {
|
||||
price: { gt: 3 }
|
||||
}
|
||||
}
|
||||
}) {
|
||||
id
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func multiRoot(t *testing.T) {
|
||||
gql := `query {
|
||||
product {
|
||||
id
|
||||
name
|
||||
customer {
|
||||
email
|
||||
}
|
||||
customers {
|
||||
email
|
||||
}
|
||||
}
|
||||
user {
|
||||
id
|
||||
email
|
||||
}
|
||||
customer {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "user")
|
||||
}
|
||||
|
||||
func withFragment1(t *testing.T) {
|
||||
gql := `
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
}
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
func withFragment2(t *testing.T) {
|
||||
gql := `
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
}
|
||||
}
|
||||
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
func withFragment3(t *testing.T) {
|
||||
gql := `
|
||||
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
// func withInlineFragment(t *testing.T) {
|
||||
// gql := `
|
||||
// query {
|
||||
// users {
|
||||
// ... on users {
|
||||
// id
|
||||
// email
|
||||
// }
|
||||
// created_at
|
||||
// ... on user {
|
||||
// first_name
|
||||
// last_name
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// `
|
||||
|
||||
// compileGQLToPSQL(t, gql, nil, "anon")
|
||||
// }
|
||||
|
||||
func withCursor(t *testing.T) {
|
||||
gql := `query {
|
||||
Products(
|
||||
first: 20
|
||||
after: $cursor
|
||||
order_by: { price: desc }) {
|
||||
Name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"cursor": json.RawMessage(`"0,1"`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func jsonColumnAsTable(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
tag_count {
|
||||
count
|
||||
tags {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "admin")
|
||||
}
|
||||
|
||||
func nullForAuthRequiredInAnon(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
id
|
||||
name
|
||||
user(where: { id: { eq: $user_id } }) {
|
||||
id
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "anon")
|
||||
}
|
||||
|
||||
func blockedQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
user(id: $id, where: { id: { gt: 3 } }) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "bad_dude")
|
||||
}
|
||||
|
||||
func blockedFunctions(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
count_id
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
compileGQLToPSQL(t, gql, nil, "bad_dude")
|
||||
}
|
||||
|
||||
func TestCompileQuery(t *testing.T) {
|
||||
t.Run("withComplexArgs", withComplexArgs)
|
||||
t.Run("withWhereIn", withWhereIn)
|
||||
t.Run("withWhereAndList", withWhereAndList)
|
||||
t.Run("withWhereIsNull", withWhereIsNull)
|
||||
t.Run("withWhereMultiOr", withWhereMultiOr)
|
||||
t.Run("fetchByID", fetchByID)
|
||||
t.Run("searchQuery", searchQuery)
|
||||
t.Run("oneToMany", oneToMany)
|
||||
t.Run("oneToManyReverse", oneToManyReverse)
|
||||
t.Run("oneToManyArray", oneToManyArray)
|
||||
t.Run("manyToMany", manyToMany)
|
||||
t.Run("manyToManyReverse", manyToManyReverse)
|
||||
t.Run("aggFunction", aggFunction)
|
||||
t.Run("aggFunctionBlockedByCol", aggFunctionBlockedByCol)
|
||||
t.Run("aggFunctionDisabled", aggFunctionDisabled)
|
||||
t.Run("aggFunctionWithFilter", aggFunctionWithFilter)
|
||||
t.Run("syntheticTables", syntheticTables)
|
||||
t.Run("queryWithVariables", queryWithVariables)
|
||||
t.Run("withWhereOnRelations", withWhereOnRelations)
|
||||
t.Run("multiRoot", multiRoot)
|
||||
t.Run("withFragment1", withFragment1)
|
||||
t.Run("withFragment2", withFragment2)
|
||||
t.Run("withFragment3", withFragment3)
|
||||
//t.Run("withInlineFragment", withInlineFragment)
|
||||
t.Run("jsonColumnAsTable", jsonColumnAsTable)
|
||||
t.Run("withCursor", withCursor)
|
||||
t.Run("nullForAuthRequiredInAnon", nullForAuthRequiredInAnon)
|
||||
t.Run("blockedQuery", blockedQuery)
|
||||
t.Run("blockedFunctions", blockedFunctions)
|
||||
}
|
||||
|
||||
var benchGQL = []byte(`query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
user {
|
||||
full_name
|
||||
picture : avatar
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
func BenchmarkCompile(b *testing.B) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL, "user")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(w, qc, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompileParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
for pb.Next() {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL, "user")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(w, qc, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1,520 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gobuffalo/flect"
|
||||
)
|
||||
|
||||
type DBSchema struct {
|
||||
ver int
|
||||
t map[string]*DBTableInfo
|
||||
rm map[string]map[string]*DBRel
|
||||
vt map[string]*VirtualTable
|
||||
fm map[string]*DBFunction
|
||||
}
|
||||
|
||||
type DBTableInfo struct {
|
||||
Name string
|
||||
Type string
|
||||
IsSingular bool
|
||||
Columns []DBColumn
|
||||
PrimaryCol *DBColumn
|
||||
TSVCol *DBColumn
|
||||
ColMap map[string]*DBColumn
|
||||
ColIDMap map[int16]*DBColumn
|
||||
Singular string
|
||||
Plural string
|
||||
}
|
||||
|
||||
type RelType int
|
||||
|
||||
const (
|
||||
RelOneToOne RelType = iota + 1
|
||||
RelOneToMany
|
||||
RelOneToManyThrough
|
||||
RelPolymorphic
|
||||
RelEmbedded
|
||||
RelRemote
|
||||
)
|
||||
|
||||
type DBRel struct {
|
||||
Type RelType
|
||||
Through struct {
|
||||
Table string
|
||||
ColL string
|
||||
ColR string
|
||||
}
|
||||
Left struct {
|
||||
col *DBColumn
|
||||
Table string
|
||||
Col string
|
||||
Array bool
|
||||
}
|
||||
Right struct {
|
||||
col *DBColumn
|
||||
Table string
|
||||
Col string
|
||||
Array bool
|
||||
}
|
||||
}
|
||||
|
||||
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
|
||||
schema := &DBSchema{
|
||||
ver: info.Version,
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
vt: make(map[string]*VirtualTable),
|
||||
fm: make(map[string]*DBFunction, len(info.Functions)),
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
err := schema.addTable(t, info.Columns[i], aliases)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := schema.virtualRels(info.VTables); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
err := schema.firstDegreeRels(t, info.Columns[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range info.Tables {
|
||||
err := schema.secondDegreeRels(t, info.Columns[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for k, f := range info.Functions {
|
||||
if len(f.Params) == 1 {
|
||||
schema.fm[strings.ToLower(f.Name)] = &info.Functions[k]
|
||||
}
|
||||
}
|
||||
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) addTable(
|
||||
t DBTable, cols []DBColumn, aliases map[string][]string) error {
|
||||
|
||||
colmap := make(map[string]*DBColumn, len(cols))
|
||||
colidmap := make(map[int16]*DBColumn, len(cols))
|
||||
|
||||
singular := flect.Singularize(t.Key)
|
||||
plural := flect.Pluralize(t.Key)
|
||||
|
||||
ts := &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Type: t.Type,
|
||||
IsSingular: true,
|
||||
Columns: cols,
|
||||
ColMap: colmap,
|
||||
ColIDMap: colidmap,
|
||||
Singular: singular,
|
||||
Plural: plural,
|
||||
}
|
||||
s.t[singular] = ts
|
||||
|
||||
tp := &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Type: t.Type,
|
||||
IsSingular: false,
|
||||
Columns: cols,
|
||||
ColMap: colmap,
|
||||
ColIDMap: colidmap,
|
||||
Singular: singular,
|
||||
Plural: plural,
|
||||
}
|
||||
s.t[plural] = tp
|
||||
|
||||
if al, ok := aliases[t.Key]; ok {
|
||||
for i := range al {
|
||||
k1 := flect.Singularize(al[i])
|
||||
s.t[k1] = ts
|
||||
|
||||
k2 := flect.Pluralize(al[i])
|
||||
s.t[k2] = tp
|
||||
}
|
||||
}
|
||||
|
||||
for i := range cols {
|
||||
c := &cols[i]
|
||||
|
||||
switch {
|
||||
case c.Type == "tsvector":
|
||||
s.t[singular].TSVCol = c
|
||||
s.t[plural].TSVCol = c
|
||||
|
||||
case c.PrimaryKey:
|
||||
s.t[singular].PrimaryCol = c
|
||||
s.t[plural].PrimaryCol = c
|
||||
}
|
||||
|
||||
colmap[c.Key] = c
|
||||
colidmap[c.ID] = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) virtualRels(vts []VirtualTable) error {
|
||||
for _, vt := range vts {
|
||||
s.vt[vt.Name] = &vt
|
||||
|
||||
for _, t := range s.t {
|
||||
idCol, ok := t.ColMap[vt.IDColumn]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok = t.ColMap[vt.TypeColumn]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
nt := DBTable{
|
||||
ID: -1,
|
||||
Name: vt.Name,
|
||||
Key: strings.ToLower(vt.Name),
|
||||
Type: "virtual",
|
||||
}
|
||||
|
||||
if err := s.addTable(nt, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rel := &DBRel{Type: RelPolymorphic}
|
||||
rel.Left.col = idCol
|
||||
rel.Left.Table = t.Name
|
||||
rel.Left.Col = idCol.Name
|
||||
|
||||
rcol := DBColumn{
|
||||
Name: vt.FKeyColumn,
|
||||
Key: strings.ToLower(vt.FKeyColumn),
|
||||
Type: idCol.Type,
|
||||
}
|
||||
|
||||
rel.Right.col = &rcol
|
||||
rel.Right.Table = vt.TypeColumn
|
||||
rel.Right.Col = rcol.Name
|
||||
|
||||
if err := s.SetRel(vt.Name, t.Name, rel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) firstDegreeRels(t DBTable, cols []DBColumn) error {
|
||||
ct := t.Key
|
||||
cti, ok := s.t[ct]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ct)
|
||||
}
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
|
||||
if c.FKeyTable == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column name
|
||||
ft := strings.ToLower(c.FKeyTable)
|
||||
|
||||
ti, ok := s.t[ft]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ft)
|
||||
}
|
||||
|
||||
// This is an embedded relationship like when a json/jsonb column
|
||||
// is exposed as a table
|
||||
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
|
||||
rel := &DBRel{Type: RelEmbedded}
|
||||
rel.Left.col = cti.PrimaryCol
|
||||
rel.Left.Table = cti.Name
|
||||
rel.Left.Col = cti.PrimaryCol.Name
|
||||
|
||||
rel.Right.col = &c
|
||||
rel.Right.Table = ti.Name
|
||||
rel.Right.Col = c.Name
|
||||
|
||||
if err := s.SetRel(ft, ct, rel); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column id
|
||||
fcid := c.FKeyColID[0]
|
||||
|
||||
fc, ok := ti.ColIDMap[fcid]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
fcid, ti.Name)
|
||||
}
|
||||
|
||||
var rel1, rel2 *DBRel
|
||||
|
||||
// One-to-many relation between current table and the
|
||||
// table in the foreign key
|
||||
if fc.UniqueKey {
|
||||
rel1 = &DBRel{Type: RelOneToOne}
|
||||
} else {
|
||||
rel1 = &DBRel{Type: RelOneToMany}
|
||||
}
|
||||
|
||||
rel1.Left.col = &c
|
||||
rel1.Left.Table = t.Name
|
||||
rel1.Left.Col = c.Name
|
||||
rel1.Left.Array = c.Array
|
||||
|
||||
rel1.Right.col = fc
|
||||
rel1.Right.Table = c.FKeyTable
|
||||
rel1.Right.Col = fc.Name
|
||||
rel1.Right.Array = fc.Array
|
||||
|
||||
if err := s.SetRel(ct, ft, rel1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// One-to-many reverse relation between the foreign key table and the
|
||||
// the current table
|
||||
if c.UniqueKey {
|
||||
rel2 = &DBRel{Type: RelOneToOne}
|
||||
} else {
|
||||
rel2 = &DBRel{Type: RelOneToMany}
|
||||
}
|
||||
|
||||
rel2.Left.col = fc
|
||||
rel2.Left.Table = c.FKeyTable
|
||||
rel2.Left.Col = fc.Name
|
||||
rel2.Left.Array = fc.Array
|
||||
|
||||
rel2.Right.col = &c
|
||||
rel2.Right.Table = t.Name
|
||||
rel2.Right.Col = c.Name
|
||||
rel2.Right.Array = c.Array
|
||||
|
||||
if err := s.SetRel(ft, ct, rel2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) secondDegreeRels(t DBTable, cols []DBColumn) error {
|
||||
jcols := make([]DBColumn, 0, len(cols))
|
||||
ct := t.Key
|
||||
cti, ok := s.t[ct]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ct)
|
||||
}
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
|
||||
if c.FKeyTable == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column name
|
||||
ft := strings.ToLower(c.FKeyTable)
|
||||
|
||||
ti, ok := s.t[ft]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key table '%s'", ft)
|
||||
}
|
||||
|
||||
// This is an embedded relationship like when a json/jsonb column
|
||||
// is exposed as a table
|
||||
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(c.FKeyColID) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Foreign key column id
|
||||
fcid := c.FKeyColID[0]
|
||||
|
||||
if _, ok := ti.ColIDMap[fcid]; !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
fcid, ti.Name)
|
||||
}
|
||||
|
||||
jcols = append(jcols, c)
|
||||
}
|
||||
|
||||
// If table contains multiple foreign key columns it's a possible
|
||||
// join table for many-to-many relationships or multiple one-to-many
|
||||
// relations
|
||||
|
||||
// Below one-to-many relations use the current table as the
|
||||
// join table aka through table.
|
||||
if len(jcols) > 1 {
|
||||
for i := range jcols {
|
||||
for n := range jcols {
|
||||
if n == i {
|
||||
continue
|
||||
}
|
||||
err := s.updateSchemaOTMT(cti, jcols[i], jcols[n])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) updateSchemaOTMT(
|
||||
ti *DBTableInfo, col1, col2 DBColumn) error {
|
||||
|
||||
t1 := strings.ToLower(col1.FKeyTable)
|
||||
t2 := strings.ToLower(col2.FKeyTable)
|
||||
|
||||
fc1, ok := s.t[t1].ColIDMap[col1.FKeyColID[0]]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
col1.FKeyColID[0], ti.Name)
|
||||
}
|
||||
fc2, ok := s.t[t2].ColIDMap[col2.FKeyColID[0]]
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
|
||||
col2.FKeyColID[0], ti.Name)
|
||||
}
|
||||
|
||||
// One-to-many-through relation between 1nd foreign key table and the
|
||||
// 2nd foreign key table
|
||||
rel1 := &DBRel{Type: RelOneToManyThrough}
|
||||
rel1.Through.Table = ti.Name
|
||||
rel1.Through.ColL = col1.Name
|
||||
rel1.Through.ColR = col2.Name
|
||||
|
||||
rel1.Left.col = fc1
|
||||
rel1.Left.Table = col1.FKeyTable
|
||||
rel1.Left.Col = fc1.Name
|
||||
|
||||
rel1.Right.col = fc2
|
||||
rel1.Right.Table = t2
|
||||
rel1.Right.Col = fc2.Name
|
||||
|
||||
if err := s.SetRel(t1, t2, rel1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// One-to-many-through relation between 2nd foreign key table and the
|
||||
// 1nd foreign key table
|
||||
rel2 := &DBRel{Type: RelOneToManyThrough}
|
||||
rel2.Through.Table = ti.Name
|
||||
rel2.Through.ColL = col2.Name
|
||||
rel2.Through.ColR = col1.Name
|
||||
|
||||
rel2.Left.col = fc2
|
||||
rel2.Left.Table = col2.FKeyTable
|
||||
rel2.Left.Col = fc2.Name
|
||||
|
||||
rel2.Right.col = fc1
|
||||
rel2.Right.Table = t1
|
||||
rel2.Right.Col = fc1.Name
|
||||
|
||||
if err := s.SetRel(t2, t1, rel2); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTableNames() []string {
|
||||
var names []string
|
||||
for name := range s.t {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
|
||||
t, ok := s.t[table]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown table '%s'", table)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
|
||||
sp := strings.ToLower(flect.Singularize(parent))
|
||||
pp := strings.ToLower(flect.Pluralize(parent))
|
||||
|
||||
sc := strings.ToLower(flect.Singularize(child))
|
||||
pc := strings.ToLower(flect.Pluralize(child))
|
||||
|
||||
if _, ok := s.rm[sc]; !ok {
|
||||
s.rm[sc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
if _, ok := s.rm[pc]; !ok {
|
||||
s.rm[pc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
if _, ok := s.rm[sc][sp]; !ok {
|
||||
s.rm[sc][sp] = rel
|
||||
}
|
||||
if _, ok := s.rm[sc][pp]; !ok {
|
||||
s.rm[sc][pp] = rel
|
||||
}
|
||||
if _, ok := s.rm[pc][sp]; !ok {
|
||||
s.rm[pc][sp] = rel
|
||||
}
|
||||
if _, ok := s.rm[pc][pp]; !ok {
|
||||
s.rm[pc][pp] = rel
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
|
||||
rel, ok := s.rm[child][parent]
|
||||
if !ok {
|
||||
// No relationship found so this time fetch the table info
|
||||
// and try again in case child or parent was an alias
|
||||
ct, err := s.GetTable(child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pt, err := s.GetTable(parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rel, ok = s.rm[ct.Name][pt.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
|
||||
child, parent)
|
||||
}
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetFunctions() []*DBFunction {
|
||||
var funcs []*DBFunction
|
||||
for _, f := range s.fm {
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
return funcs
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
package psql
|
||||
|
||||
type IntStack struct {
|
||||
stA [20]int32
|
||||
st []int32
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new IntStack
|
||||
func NewIntStack() *IntStack {
|
||||
s := &IntStack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the IntStack
|
||||
func (s *IntStack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the IntStack
|
||||
func (s *IntStack) Peek() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
return s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the IntStack and return it
|
||||
func (s *IntStack) Pop() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s.top--
|
||||
return s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the IntStack
|
||||
func (s *IntStack) Push(value int32) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
package psql
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (rt RelType) String() string {
|
||||
switch rt {
|
||||
case RelOneToOne:
|
||||
return "one to one"
|
||||
case RelOneToMany:
|
||||
return "one to many"
|
||||
case RelOneToManyThrough:
|
||||
return "one to many through"
|
||||
case RelRemote:
|
||||
return "remote"
|
||||
case RelEmbedded:
|
||||
return "embedded"
|
||||
case RelPolymorphic:
|
||||
return "polymorphic"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (re *DBRel) String() string {
|
||||
if re.Type == RelOneToManyThrough {
|
||||
return fmt.Sprintf("'%s.%s' --(%s.%s, %s.%s)--> '%s.%s'",
|
||||
re.Left.Table, re.Left.Col,
|
||||
re.Through.Table, re.Through.ColL, re.Through.Table, re.Through.ColR,
|
||||
re.Right.Table, re.Right.Col)
|
||||
}
|
||||
return fmt.Sprintf("'%s.%s' --(%s)--> '%s.%s'",
|
||||
re.Left.Table, re.Left.Col, re.Type, re.Right.Table, re.Right.Col)
|
||||
}
|
@ -1,335 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/jackc/pgtype"
|
||||
)
|
||||
|
||||
type DBInfo struct {
|
||||
Version int
|
||||
Tables []DBTable
|
||||
Columns [][]DBColumn
|
||||
Functions []DBFunction
|
||||
VTables []VirtualTable
|
||||
colMap map[string]map[string]*DBColumn
|
||||
}
|
||||
|
||||
type VirtualTable struct {
|
||||
Name string
|
||||
IDColumn string
|
||||
TypeColumn string
|
||||
FKeyColumn string
|
||||
}
|
||||
|
||||
func GetDBInfo(db *sql.DB, schema string) (*DBInfo, error) {
|
||||
di := &DBInfo{}
|
||||
var version string
|
||||
|
||||
err := db.QueryRow(`SHOW server_version_num`).Scan(&version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching version: %w", err)
|
||||
}
|
||||
|
||||
di.Version, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Tables, err = GetTables(db, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, t := range di.Tables {
|
||||
cols, err := GetColumns(db, schema, t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
di.Columns = append(di.Columns, cols)
|
||||
}
|
||||
|
||||
di.colMap = newColMap(di.Tables, di.Columns)
|
||||
|
||||
di.Functions, err = GetFunctions(db, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func newColMap(tables []DBTable, columns [][]DBColumn) map[string]map[string]*DBColumn {
|
||||
cm := make(map[string]map[string]*DBColumn, len(tables))
|
||||
|
||||
for i, t := range tables {
|
||||
cols := columns[i]
|
||||
cm[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for n, c := range cols {
|
||||
cm[t.Key][c.Key] = &columns[i][n]
|
||||
}
|
||||
}
|
||||
|
||||
return cm
|
||||
}
|
||||
|
||||
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
|
||||
t.ID = di.Tables[len(di.Tables)-1].ID
|
||||
|
||||
di.Tables = append(di.Tables, t)
|
||||
di.colMap[t.Key] = make(map[string]*DBColumn, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
cols[i].ID = int16(i)
|
||||
c := &cols[i]
|
||||
di.colMap[t.Key][c.Key] = c
|
||||
}
|
||||
di.Columns = append(di.Columns, cols)
|
||||
}
|
||||
|
||||
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
|
||||
v, ok := di.colMap[strings.ToLower(table)][strings.ToLower(column)]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
type DBTable struct {
|
||||
ID int
|
||||
Name string
|
||||
Key string
|
||||
Type string
|
||||
}
|
||||
|
||||
func GetTables(db *sql.DB, schema string) ([]DBTable, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
c.relname as "name",
|
||||
CASE c.relkind WHEN 'r' THEN 'table'
|
||||
WHEN 'v' THEN 'view'
|
||||
WHEN 'm' THEN 'materialized view'
|
||||
WHEN 'f' THEN 'foreign table'
|
||||
END as "type"
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relkind IN ('r','v','m','f','')
|
||||
AND n.nspname = $1
|
||||
AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
|
||||
var tables []DBTable
|
||||
|
||||
rows, err := db.Query(sqlStmt, schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching tables: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for i := 0; rows.Next(); i++ {
|
||||
t := DBTable{ID: i}
|
||||
err = rows.Scan(&t.Name, &t.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Key = strings.ToLower(t.Name)
|
||||
if t.Key != "schema_migrations" && t.Key != "ar_internal_metadata" {
|
||||
tables = append(tables, t)
|
||||
}
|
||||
}
|
||||
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
type DBColumn struct {
|
||||
ID int16
|
||||
Name string
|
||||
Key string
|
||||
Type string
|
||||
Array bool
|
||||
NotNull bool
|
||||
PrimaryKey bool
|
||||
UniqueKey bool
|
||||
FKeyTable string
|
||||
FKeyColID []int16
|
||||
fKeyColID pgtype.Int2Array
|
||||
}
|
||||
|
||||
func GetColumns(db *sql.DB, schema, table string) ([]DBColumn, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
f.attnum AS id,
|
||||
f.attname AS name,
|
||||
f.attnotnull AS notnull,
|
||||
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
|
||||
CASE
|
||||
WHEN f.attndims != 0 THEN true
|
||||
WHEN right(pg_catalog.format_type(f.atttypid,f.atttypmod), 2) = '[]' THEN true
|
||||
ELSE false
|
||||
END AS array,
|
||||
CASE
|
||||
WHEN p.contype = ('p'::char) THEN true
|
||||
ELSE false
|
||||
END AS primarykey,
|
||||
CASE
|
||||
WHEN p.contype = ('u'::char) THEN true
|
||||
ELSE false
|
||||
END AS uniquekey,
|
||||
CASE
|
||||
WHEN p.contype = ('f'::char) THEN g.relname
|
||||
ELSE ''::text
|
||||
END AS foreignkey,
|
||||
CASE
|
||||
WHEN p.contype = ('f'::char) THEN p.confkey::int2[]
|
||||
ELSE ARRAY[]::int2[]
|
||||
END AS foreignkey_fieldnum
|
||||
FROM pg_attribute f
|
||||
JOIN pg_class c ON c.oid = f.attrelid
|
||||
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
|
||||
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
||||
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
||||
WHERE c.relkind IN ('r', 'v', 'm', 'f')
|
||||
AND n.nspname = $1 -- Replace with Schema name
|
||||
AND c.relname = $2 -- Replace with table name
|
||||
AND f.attnum > 0
|
||||
AND f.attisdropped = false
|
||||
ORDER BY id;`
|
||||
|
||||
rows, err := db.Query(sqlStmt, schema, table)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching columns: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cmap := make(map[int16]DBColumn)
|
||||
|
||||
for rows.Next() {
|
||||
c := DBColumn{}
|
||||
|
||||
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.Array, &c.PrimaryKey, &c.UniqueKey, &c.FKeyTable, &c.fKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v, ok := cmap[c.ID]; ok {
|
||||
if c.PrimaryKey {
|
||||
v.PrimaryKey = true
|
||||
v.UniqueKey = true
|
||||
}
|
||||
if c.NotNull {
|
||||
v.NotNull = true
|
||||
}
|
||||
if c.UniqueKey {
|
||||
v.UniqueKey = true
|
||||
}
|
||||
if c.Array {
|
||||
v.Array = true
|
||||
}
|
||||
if len(c.FKeyTable) != 0 {
|
||||
v.FKeyTable = c.FKeyTable
|
||||
}
|
||||
if c.fKeyColID.Elements != nil {
|
||||
v.fKeyColID = c.fKeyColID
|
||||
err := v.fKeyColID.AssignTo(&v.FKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cmap[c.ID] = v
|
||||
} else {
|
||||
err := c.fKeyColID.AssignTo(&c.FKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Key = strings.ToLower(c.Name)
|
||||
if c.PrimaryKey {
|
||||
c.UniqueKey = true
|
||||
}
|
||||
cmap[c.ID] = c
|
||||
}
|
||||
}
|
||||
|
||||
cols := make([]DBColumn, 0, len(cmap))
|
||||
for i := range cmap {
|
||||
cols = append(cols, cmap[i])
|
||||
}
|
||||
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
type DBFunction struct {
|
||||
Name string
|
||||
Params []DBFuncParam
|
||||
}
|
||||
|
||||
type DBFuncParam struct {
|
||||
ID int
|
||||
Name sql.NullString
|
||||
Type string
|
||||
}
|
||||
|
||||
func GetFunctions(db *sql.DB, schema string) ([]DBFunction, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
routines.routine_name,
|
||||
parameters.specific_name,
|
||||
parameters.data_type,
|
||||
parameters.parameter_name,
|
||||
parameters.ordinal_position
|
||||
FROM
|
||||
information_schema.routines
|
||||
RIGHT JOIN
|
||||
information_schema.parameters
|
||||
ON (routines.specific_name = parameters.specific_name and parameters.ordinal_position IS NOT NULL)
|
||||
WHERE
|
||||
routines.specific_schema = $1
|
||||
ORDER BY
|
||||
routines.routine_name, parameters.ordinal_position;`
|
||||
|
||||
rows, err := db.Query(sqlStmt, schema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching functions: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var funcs []DBFunction
|
||||
fm := make(map[string]int)
|
||||
|
||||
parameterIndex := 1
|
||||
for rows.Next() {
|
||||
var fn, fid string
|
||||
fp := DBFuncParam{}
|
||||
|
||||
err = rows.Scan(&fn, &fid, &fp.Type, &fp.Name, &fp.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !fp.Name.Valid {
|
||||
fp.Name.String = string(parameterIndex)
|
||||
fp.Name.Valid = true
|
||||
}
|
||||
|
||||
if i, ok := fm[fid]; ok {
|
||||
funcs[i].Params = append(funcs[i].Params, fp)
|
||||
} else {
|
||||
funcs = append(funcs, DBFunction{Name: fn, Params: []DBFuncParam{fp}})
|
||||
fm[fid] = len(funcs) - 1
|
||||
}
|
||||
parameterIndex++
|
||||
}
|
||||
|
||||
return funcs, nil
|
||||
}
|
||||
|
||||
// func GetValType(type string) qcode.ValType {
|
||||
// switch {
|
||||
// case "bigint", "integer", "smallint", "numeric", "bigserial":
|
||||
// return qcode.ValInt
|
||||
// case "double precision", "real":
|
||||
// return qcode.ValFloat
|
||||
// case ""
|
||||
// }
|
||||
// }
|
@ -1,91 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetTestDBInfo() *DBInfo {
|
||||
tables := []DBTable{
|
||||
DBTable{Name: "customers", Type: "table"},
|
||||
DBTable{Name: "users", Type: "table"},
|
||||
DBTable{Name: "products", Type: "table"},
|
||||
DBTable{Name: "purchases", Type: "table"},
|
||||
DBTable{Name: "tags", Type: "table"},
|
||||
DBTable{Name: "tag_count", Type: "json"},
|
||||
}
|
||||
|
||||
columns := [][]DBColumn{
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
|
||||
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
|
||||
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
|
||||
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
|
||||
[]DBColumn{
|
||||
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
|
||||
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
|
||||
}
|
||||
|
||||
for i := range tables {
|
||||
tables[i].Key = strings.ToLower(tables[i].Name)
|
||||
for n := range columns[i] {
|
||||
columns[i][n].Key = strings.ToLower(columns[i][n].Name)
|
||||
}
|
||||
}
|
||||
|
||||
return &DBInfo{
|
||||
Version: 110000,
|
||||
Tables: tables,
|
||||
Columns: columns,
|
||||
Functions: []DBFunction{},
|
||||
colMap: newColMap(tables, columns),
|
||||
}
|
||||
}
|
||||
|
||||
func GetTestSchema() (*DBSchema, error) {
|
||||
aliases := map[string][]string{
|
||||
"users": []string{"mes"},
|
||||
}
|
||||
|
||||
return NewDBSchema(GetTestDBInfo(), aliases)
|
||||
}
|
@ -1,163 +0,0 @@
|
||||
=== RUN TestCompileInsert
|
||||
=== RUN TestCompileInsert/simpleInsert
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/singleInsert
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description", "price", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'user_id' AS bigint) FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/bulkInsert
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/simpleInsertWithPresets
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, $2 :: bigint FROM "_sg_input" i RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertManyToMany
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "price") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i RETURNING *), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "customer_id", "product_id") SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone), "customers"."id", "products"."id" FROM "_sg_input" i, "customers", "products" RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i RETURNING *), "products" AS (INSERT INTO "products" ("name", "price") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "product_id", "customer_id") SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone), "products"."id", "customers"."id" FROM "_sg_input" i, "products", "customers" RETURNING *) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToMany
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "users"."id" FROM "_sg_input" i, "users" RETURNING *) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOne
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "users"."id" FROM "_sg_input" i, "users" RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToManyWithConnect
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i RETURNING *), "products" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnect
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "_x_users"."id" FROM "_sg_input" i, "_x_users" RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user", "__sj_2"."json" AS "tags" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileInsert/nestedInsertOneToOneWithConnectArray
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone), "_x_users"."id" FROM "_sg_input" i, "_x_users" RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileInsert (0.03s)
|
||||
--- PASS: TestCompileInsert/simpleInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/singleInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/bulkInsert (0.00s)
|
||||
--- PASS: TestCompileInsert/simpleInsertWithPresets (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertManyToMany (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToMany (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOne (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToManyWithConnect (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnect (0.00s)
|
||||
--- PASS: TestCompileInsert/nestedInsertOneToOneWithConnectArray (0.00s)
|
||||
=== RUN TestCompileMutate
|
||||
=== RUN TestCompileMutate/singleUpsert
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/singleUpsertWhere
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > '3' :: numeric(7,2)) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/bulkUpsert
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileMutate/delete
|
||||
WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = '1' :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileMutate (0.01s)
|
||||
--- PASS: TestCompileMutate/singleUpsert (0.00s)
|
||||
--- PASS: TestCompileMutate/singleUpsertWhere (0.00s)
|
||||
--- PASS: TestCompileMutate/bulkUpsert (0.00s)
|
||||
--- PASS: TestCompileMutate/delete (0.00s)
|
||||
=== RUN TestCompileQuery
|
||||
=== RUN TestCompileQuery/withComplexArgs
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT DISTINCT ON ("products"."price") "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."id") < '28' :: bigint) AND (("products"."id") >= '20' :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) ORDER BY "products"."price" DESC LIMIT ('30') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereIn
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = ANY (ARRAY(SELECT json_array_elements_text($1)) :: bigint[])))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereAndList
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereIsNull
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE (((("products"."price") > '10' :: numeric(7,2)) AND NOT (("products"."id") IS NULL) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereMultiOr
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND ((("products"."price") < '20' :: numeric(7,2)) OR (("products"."price") > '10' :: numeric(7,2)) OR NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/fetchByID
|
||||
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") = $1 :: bigint))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/searchQuery
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."search_rank" AS "search_rank", "products_0"."search_headline_description" AS "search_headline_description" FROM (SELECT "products"."id", "products"."name", ts_rank("products"."tsv", websearch_to_tsquery($1)) AS "search_rank", ts_headline("products"."description", websearch_to_tsquery($1)) AS "search_headline_description" FROM "products" WHERE ((("products"."tsv") @@ websearch_to_tsquery($1))) LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToMany
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."email" AS "email", "__sj_1"."json" AS "products" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToManyReverse
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "__sj_1"."json" AS "users" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."email" AS "email" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/oneToManyArray
|
||||
SELECT jsonb_build_object('tags', "__sj_0"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "products_2"."name" AS "name", "products_2"."price" AS "price", "__sj_3"."json" AS "tags" FROM (SELECT "products"."name", "products"."price", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "tags_3"."id" AS "id", "tags_3"."name" AS "name" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_2"."tags"))) LIMIT ('20') :: integer) AS "tags_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "tags_0"."name" AS "name", "__sj_1"."json" AS "product" FROM (SELECT "tags"."name", "tags"."slug" FROM "tags" LIMIT ('20') :: integer) AS "tags_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" WHERE ((("tags_0"."slug") = any ("products"."tags"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/manyToMany
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name", "__sj_1"."json" AS "customers" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers"."id")) WHERE ((("purchases"."product_id") = ("products"."id"))) LIMIT ('20') :: integer) AS "customers_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/manyToManyReverse
|
||||
SELECT jsonb_build_object('customers', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "__sj_1"."json" AS "products" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_1"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."name" AS "name" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products"."id")) WHERE ((("purchases"."customer_id") = ("customers"."id")) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('20') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunction
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionBlockedByCol
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionDisabled
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."name" AS "name" FROM (SELECT "products"."name" FROM "products" GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/aggFunctionWithFilter
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))) AND (("products"."id") > '10' :: bigint))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/syntheticTables
|
||||
SELECT jsonb_build_object('me', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = $1 :: bigint)) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/queryWithVariables
|
||||
SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") = $1 :: numeric(7,2)) AND (("products"."id") = $2 :: bigint) AND ((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2))))) LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withWhereOnRelations
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")) AND ((("products"."price") > '3' :: numeric(7,2))))) LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/multiRoot
|
||||
SELECT jsonb_build_object('customer', "__sj_0"."json", 'user', "__sj_1"."json", 'product', "__sj_2"."json") as "__root" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "products_2"."id" AS "id", "products_2"."name" AS "name", "__sj_3"."json" AS "customers", "__sj_4"."json" AS "customer" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") > '0' :: numeric(7,2)) AND (("products"."price") < '8' :: numeric(7,2)))) LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_4".*) AS "json"FROM (SELECT "customers_4"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers"."id")) WHERE ((("purchases"."product_id") = ("products"."id"))) LIMIT ('1') :: integer) AS "customers_4") AS "__sr_4") AS "__sj_4" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_3"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_3".*) AS "json"FROM (SELECT "customers_3"."email" AS "email" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers"."id")) WHERE ((("purchases"."product_id") = ("products"."id"))) LIMIT ('20') :: integer) AS "customers_3") AS "__sr_3") AS "__sj_3") AS "__sj_3" ON ('true')) AS "__sr_2") AS "__sj_2", (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1", (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "customers_0"."id" AS "id" FROM (SELECT "customers"."id" FROM "customers" LIMIT ('1') :: integer) AS "customers_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withFragment1
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."first_name" AS "first_name", "users_0"."last_name" AS "last_name", "users_0"."created_at" AS "created_at", "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT , "users"."created_at", "users"."id", "users"."email" FROM "users" GROUP BY "users"."created_at", "users"."id", "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withFragment2
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."first_name" AS "first_name", "users_0"."last_name" AS "last_name", "users_0"."created_at" AS "created_at", "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT , "users"."created_at", "users"."id", "users"."email" FROM "users" GROUP BY "users"."created_at", "users"."id", "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withFragment3
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."first_name" AS "first_name", "users_0"."last_name" AS "last_name", "users_0"."created_at" AS "created_at", "users_0"."id" AS "id", "users_0"."email" AS "email" FROM (SELECT , "users"."created_at", "users"."id", "users"."email" FROM "users" GROUP BY "users"."created_at", "users"."id", "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/jsonColumnAsTable
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "tag_count" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "tag_count_1"."count" AS "count", "__sj_2"."json" AS "tags" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(jsonb_agg("__sj_2"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "tags_2"."name" AS "name" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2") AS "__sr_2") AS "__sj_2") AS "__sj_2" ON ('true')) AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/withCursor
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json", 'products_cursor', "__sj_0"."cursor") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json", CONCAT_WS(',', max("__cur_0"), max("__cur_1")) as "cursor" FROM (SELECT to_jsonb("__sr_0".*) - '__cur_0' - '__cur_1' AS "json", "__cur_0", "__cur_1"FROM (SELECT "products_0"."name" AS "name", LAST_VALUE("products_0"."price") OVER() AS "__cur_0", LAST_VALUE("products_0"."id") OVER() AS "__cur_1" FROM (WITH "__cur" AS (SELECT a[1] as "price", a[2] as "id" FROM string_to_array($1, ',') as a) SELECT "products"."name", "products"."id", "products"."price" FROM "products", "__cur" WHERE (((("products"."price") < "__cur"."price" :: numeric(7,2)) OR ((("products"."price") = "__cur"."price" :: numeric(7,2)) AND (("products"."id") > "__cur"."id" :: bigint)))) ORDER BY "products"."price" DESC, "products"."id" ASC LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/nullForAuthRequiredInAnon
|
||||
SELECT jsonb_build_object('products', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", NULL AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/blockedQuery
|
||||
SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileQuery/blockedFunctions
|
||||
SELECT jsonb_build_object('users', "__sj_0"."json") as "__root" FROM (SELECT coalesce(jsonb_agg("__sj_0"."json"), '[]') as "json" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."email" AS "email" FROM (SELECT , "users"."email" FROM "users" WHERE (false) GROUP BY "users"."email" LIMIT ('20') :: integer) AS "users_0") AS "__sr_0") AS "__sj_0") AS "__sj_0"
|
||||
--- PASS: TestCompileQuery (0.03s)
|
||||
--- PASS: TestCompileQuery/withComplexArgs (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereIn (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereAndList (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereIsNull (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereMultiOr (0.00s)
|
||||
--- PASS: TestCompileQuery/fetchByID (0.00s)
|
||||
--- PASS: TestCompileQuery/searchQuery (0.00s)
|
||||
--- PASS: TestCompileQuery/oneToMany (0.00s)
|
||||
--- PASS: TestCompileQuery/oneToManyReverse (0.00s)
|
||||
--- PASS: TestCompileQuery/oneToManyArray (0.00s)
|
||||
--- PASS: TestCompileQuery/manyToMany (0.00s)
|
||||
--- PASS: TestCompileQuery/manyToManyReverse (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunction (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunctionBlockedByCol (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunctionDisabled (0.00s)
|
||||
--- PASS: TestCompileQuery/aggFunctionWithFilter (0.00s)
|
||||
--- PASS: TestCompileQuery/syntheticTables (0.00s)
|
||||
--- PASS: TestCompileQuery/queryWithVariables (0.00s)
|
||||
--- PASS: TestCompileQuery/withWhereOnRelations (0.00s)
|
||||
--- PASS: TestCompileQuery/multiRoot (0.00s)
|
||||
--- PASS: TestCompileQuery/withFragment1 (0.00s)
|
||||
--- PASS: TestCompileQuery/withFragment2 (0.00s)
|
||||
--- PASS: TestCompileQuery/withFragment3 (0.00s)
|
||||
--- PASS: TestCompileQuery/jsonColumnAsTable (0.00s)
|
||||
--- PASS: TestCompileQuery/withCursor (0.00s)
|
||||
--- PASS: TestCompileQuery/nullForAuthRequiredInAnon (0.00s)
|
||||
--- PASS: TestCompileQuery/blockedQuery (0.00s)
|
||||
--- PASS: TestCompileQuery/blockedFunctions (0.00s)
|
||||
=== RUN TestCompileUpdate
|
||||
=== RUN TestCompileUpdate/singleUpdate
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (UPDATE "products" SET ("name", "description") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'description' AS text) FROM "_sg_input" i) WHERE ((("products"."id") = '1' :: bigint) AND (("products"."id") = $2 :: bigint)) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/simpleUpdateWithPresets
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "updated_at") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), 'now' :: timestamp without time zone FROM "_sg_input" i) WHERE (("products"."user_id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateManyToMany
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("purchases"."id") = $2 :: bigint) RETURNING "purchases".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT CAST( i.j ->>'sale_type' AS character varying), CAST( i.j ->>'quantity' AS integer), CAST( i.j ->>'due_date' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("purchases"."id") = $2 :: bigint) RETURNING "purchases".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)) FROM "_sg_input" i) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*) SELECT jsonb_build_object('purchase', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "__sj_1"."json" AS "product", "__sj_2"."json" AS "customer" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_2".*) AS "json"FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2") AS "__sr_2") AS "__sj_2" ON ('true') LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToMany
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("users"."id") = '8' :: bigint) RETURNING "users".*), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) FROM "users" WHERE (("products"."user_id") = ("users"."id") AND "products"."id"= ((i.j->'product'->'where'->>'id'))::bigint) RETURNING "products".*) SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOne
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*), "users" AS (UPDATE "users" SET ("email") = (SELECT CAST( i.j ->>'email' AS character varying) FROM "_sg_input" i) FROM "products" WHERE (("users"."id") = ("products"."user_id")) RETURNING "users".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToManyWithConnect
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT CAST( i.j ->>'full_name' AS character varying), CAST( i.j ->>'email' AS character varying), CAST( i.j ->>'created_at' AS timestamp without time zone), CAST( i.j ->>'updated_at' AS timestamp without time zone) FROM "_sg_input" i) WHERE (("users"."id") = $2 :: bigint) RETURNING "users".*), "products_c" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*), "products_d" AS ( UPDATE "products" SET "user_id" = NULL FROM "users" WHERE ("products"."id"= ((i.j->'product'->'disconnect'->>'id'))::bigint) RETURNING "products".*), "products" AS (SELECT * FROM "products_c" UNION ALL SELECT * FROM "products_d") SELECT jsonb_build_object('user', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "__sj_1"."json" AS "product" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithConnect
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), "_x_users"."id" FROM "_sg_input" i, "_x_users") WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), "_x_users"."id" FROM "_sg_input" i, "_x_users") WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "__sj_1"."json" AS "user" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT to_jsonb("__sr_1".*) AS "json"FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1") AS "__sr_1") AS "__sj_1" ON ('true')) AS "__sr_0") AS "__sj_0"
|
||||
=== RUN TestCompileUpdate/nestedUpdateOneToOneWithDisconnect
|
||||
WITH "_sg_input" AS (SELECT $1 :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT CAST( i.j ->>'name' AS character varying), CAST( i.j ->>'price' AS numeric(7,2)), "_x_users"."id" FROM "_sg_input" i, "_x_users") WHERE (("products"."id") = $2 :: bigint) RETURNING "products".*) SELECT jsonb_build_object('product', "__sj_0"."json") as "__root" FROM (SELECT to_jsonb("__sr_0".*) AS "json"FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0") AS "__sr_0") AS "__sj_0"
|
||||
--- PASS: TestCompileUpdate (0.02s)
|
||||
--- PASS: TestCompileUpdate/singleUpdate (0.00s)
|
||||
--- PASS: TestCompileUpdate/simpleUpdateWithPresets (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateManyToMany (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToMany (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOne (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToManyWithConnect (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithConnect (0.00s)
|
||||
--- PASS: TestCompileUpdate/nestedUpdateOneToOneWithDisconnect (0.00s)
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/psql 0.323s
|
@ -1,223 +0,0 @@
|
||||
//nolint:errcheck
|
||||
package psql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
)
|
||||
|
||||
func (c *compilerContext) renderUpdate(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
update, ok := vars[qc.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("variable '%s' not !defined", qc.ActionVar)
|
||||
}
|
||||
if len(update) == 0 {
|
||||
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
|
||||
}
|
||||
|
||||
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT `)
|
||||
c.md.renderValueExp(c.w, Param{Name: qc.ActionVar, Type: "json"})
|
||||
// io.WriteString(c.w, qc.ActionVar)
|
||||
io.WriteString(c.w, ` :: json AS j)`)
|
||||
|
||||
st := util.NewStack()
|
||||
st.Push(kvitem{_type: itemUpdate, key: ti.Name, val: update, ti: ti})
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
if update[0] == '[' && st.Len() > 1 {
|
||||
return 0, errors.New("Nested bulk update not supported")
|
||||
}
|
||||
intf := st.Pop()
|
||||
|
||||
switch item := intf.(type) {
|
||||
case kvitem:
|
||||
if err := c.handleKVItem(st, item); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case renitem:
|
||||
var err error
|
||||
|
||||
// if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
|
||||
// io.WriteString(c.w, ` WHERE false`)
|
||||
// }
|
||||
|
||||
switch item._type {
|
||||
case itemUpdate:
|
||||
err = c.renderUpdateStmt(w, qc, item)
|
||||
case itemConnect:
|
||||
err = c.renderConnectStmt(qc, w, item)
|
||||
case itemDisconnect:
|
||||
err = c.renderDisconnectStmt(qc, w, item)
|
||||
case itemUnion:
|
||||
err = c.renderUnionStmt(w, item)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpdateStmt(w io.Writer, qc *qcode.QCode, item renitem) error {
|
||||
ti := item.ti
|
||||
jt := item.data
|
||||
sk := nestedUpdateRelColumnsMap(item.kvitem)
|
||||
|
||||
io.WriteString(c.w, `, `)
|
||||
renderCteName(c.w, item.kvitem)
|
||||
io.WriteString(c.w, ` AS (`)
|
||||
|
||||
io.WriteString(w, `UPDATE `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, ` SET (`)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, false)
|
||||
renderNestedUpdateRelColumns(w, item.kvitem, false)
|
||||
|
||||
io.WriteString(w, `) = (SELECT `)
|
||||
c.renderInsertUpdateColumns(qc, jt, ti, sk, true)
|
||||
renderNestedUpdateRelColumns(w, item.kvitem, true)
|
||||
|
||||
io.WriteString(w, ` FROM "_sg_input" i`)
|
||||
renderNestedUpdateRelTables(w, item.kvitem)
|
||||
io.WriteString(w, `) `)
|
||||
|
||||
if item.id != 0 {
|
||||
// Render sql to set id values if child-to-parent
|
||||
// relationship is one-to-one
|
||||
rel := item.relCP
|
||||
|
||||
io.WriteString(w, `FROM `)
|
||||
quoted(w, rel.Right.Table)
|
||||
|
||||
io.WriteString(w, ` WHERE ((`)
|
||||
colWithTable(w, rel.Left.Table, rel.Left.Col)
|
||||
io.WriteString(w, `) = (`)
|
||||
colWithTable(w, rel.Right.Table, rel.Right.Col)
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
if item.relPC.Type == RelOneToMany {
|
||||
if conn, ok := item.data["where"]; ok {
|
||||
io.WriteString(w, ` AND `)
|
||||
renderWhereFromJSON(w, item.kvitem, "where", conn)
|
||||
} else if conn, ok := item.data["_where"]; ok {
|
||||
io.WriteString(w, ` AND `)
|
||||
renderWhereFromJSON(w, item.kvitem, "_where", conn)
|
||||
}
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
|
||||
} else if qc.Selects[0].Where != nil {
|
||||
io.WriteString(w, `WHERE `)
|
||||
if err := c.renderWhere(&qc.Selects[0], ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, `.*)`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func nestedUpdateRelColumnsMap(item kvitem) map[string]struct{} {
|
||||
sk := make(map[string]struct{}, len(item.items))
|
||||
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
sk[v.relCP.Right.Col] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return sk
|
||||
}
|
||||
|
||||
func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
|
||||
// Render child foreign key columns if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
if values {
|
||||
// if v.relCP.Right.Array {
|
||||
// io.WriteString(w, `array_diff(`)
|
||||
// colWithTable(w, v.relCP.Right.Table, v.relCP.Right.Col)
|
||||
// io.WriteString(w, `, `)
|
||||
// }
|
||||
|
||||
if v._ctype > 0 {
|
||||
io.WriteString(w, `"_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `".`)
|
||||
quoted(w, v.relCP.Left.Col)
|
||||
} else {
|
||||
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
|
||||
}
|
||||
|
||||
// if v.relCP.Right.Array {
|
||||
// io.WriteString(w, `)`)
|
||||
// }
|
||||
} else {
|
||||
|
||||
quoted(w, v.relCP.Right.Col)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
|
||||
// Render tables needed to set values if child-to-parent
|
||||
// relationship is one-to-many
|
||||
for _, v := range item.items {
|
||||
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
|
||||
io.WriteString(w, `, "_x_`)
|
||||
io.WriteString(w, v.relCP.Left.Table)
|
||||
io.WriteString(w, `"`)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDelete(
|
||||
w io.Writer, qc *qcode.QCode, vars Variables, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
root := &qc.Selects[0]
|
||||
|
||||
io.WriteString(c.w, `WITH `)
|
||||
quoted(c.w, ti.Name)
|
||||
|
||||
io.WriteString(c.w, ` AS (DELETE FROM `)
|
||||
quoted(c.w, ti.Name)
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if root.Where == nil {
|
||||
return 0, errors.New("'where' clause missing in delete mutation")
|
||||
}
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(w, ` RETURNING `)
|
||||
quoted(w, ti.Name)
|
||||
io.WriteString(w, `.*) `)
|
||||
return 0, nil
|
||||
}
|
@ -1,258 +0,0 @@
|
||||
package psql_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func singleUpdate(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: $id, update: $update, where: { id: { eq: 1 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "anon")
|
||||
}
|
||||
|
||||
func simpleUpdateWithPresets(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{"name": "Apple", "price": 1.25}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "user")
|
||||
}
|
||||
|
||||
func nestedUpdateManyToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
purchase(update: $data, id: $id) {
|
||||
sale_type
|
||||
quantity
|
||||
due_date
|
||||
customer {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(` {
|
||||
"sale_type": "bought",
|
||||
"quantity": 5,
|
||||
"due_date": "now",
|
||||
"customer": {
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude"
|
||||
},
|
||||
"product": {
|
||||
"name": "Apple",
|
||||
"price": 1.25
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToMany(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(update: $data, where: { id: { eq: 8 } }) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"where": {
|
||||
"id": 2
|
||||
},
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToOne(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data, id: $id) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"user": {
|
||||
"email": "thedude@rug.com"
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
|
||||
}
|
||||
|
||||
func nestedUpdateOneToManyWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(update: $data, id: $id) {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
product {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"email": "thedude@rug.com",
|
||||
"full_name": "The Dude",
|
||||
"created_at": "now",
|
||||
"updated_at": "now",
|
||||
"product": {
|
||||
"connect": { "id": 7 },
|
||||
"disconnect": { "id": 8 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToOneWithConnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data, id: $product_id) {
|
||||
id
|
||||
name
|
||||
user {
|
||||
id
|
||||
full_name
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"connect": { "id": 5, "email": "test@test.com" }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(update: $data, id: $id) {
|
||||
id
|
||||
name
|
||||
user_id
|
||||
}
|
||||
}`
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{
|
||||
"name": "Apple",
|
||||
"price": 1.25,
|
||||
"user": {
|
||||
"disconnect": { "id": 5 }
|
||||
}
|
||||
}`),
|
||||
}
|
||||
|
||||
compileGQLToPSQL(t, gql, vars, "admin")
|
||||
}
|
||||
|
||||
// func nestedUpdateOneToOneWithDisconnectArray(t *testing.T) {
|
||||
// gql := `mutation {
|
||||
// product(update: $data, id: 2) {
|
||||
// id
|
||||
// name
|
||||
// user_id
|
||||
// }
|
||||
// }`
|
||||
|
||||
// sql := `WITH "_sg_input" AS (SELECT $1 :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
|
||||
|
||||
// vars := map[string]json.RawMessage{
|
||||
// "data": json.RawMessage(`{
|
||||
// "name": "Apple",
|
||||
// "price": 1.25,
|
||||
// "user": {
|
||||
// "disconnect": { "id": 5 }
|
||||
// }
|
||||
// }`),
|
||||
// }
|
||||
|
||||
// resSQL, err := compileGQLToPSQL(gql, vars, "admin")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
// if string(resSQL) != sql {
|
||||
// t.Fatal(errNotExpected)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestCompileUpdate(t *testing.T) {
|
||||
t.Run("singleUpdate", singleUpdate)
|
||||
t.Run("simpleUpdateWithPresets", simpleUpdateWithPresets)
|
||||
t.Run("nestedUpdateManyToMany", nestedUpdateManyToMany)
|
||||
t.Run("nestedUpdateOneToMany", nestedUpdateOneToMany)
|
||||
t.Run("nestedUpdateOneToOne", nestedUpdateOneToOne)
|
||||
t.Run("nestedUpdateOneToManyWithConnect", nestedUpdateOneToManyWithConnect)
|
||||
t.Run("nestedUpdateOneToOneWithConnect", nestedUpdateOneToOneWithConnect)
|
||||
t.Run("nestedUpdateOneToOneWithDisconnect", nestedUpdateOneToOneWithDisconnect)
|
||||
//t.Run("nestedUpdateOneToOneWithDisconnectArray", nestedUpdateOneToOneWithDisconnectArray)
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile-16 120888 9236 ns/op 3755 B/op 28 allocs/op
|
||||
BenchmarkQCompileP-16 502248 2620 ns/op 3795 B/op 28 allocs/op
|
||||
BenchmarkParse-16 128370 9294 ns/op 3902 B/op 18 allocs/op
|
||||
BenchmarkParseP-16 575752 2340 ns/op 3903 B/op 18 allocs/op
|
||||
BenchmarkSchemaParse-16 212048 5779 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP-16 630918 1686 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 7.710s
|
@ -1,13 +0,0 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile-16 118282 9686 ns/op 4031 B/op 30 allocs/op
|
||||
BenchmarkQCompileP-16 427531 2710 ns/op 4077 B/op 30 allocs/op
|
||||
BenchmarkQCompileFragment-16 140588 8328 ns/op 8903 B/op 13 allocs/op
|
||||
BenchmarkParse-16 131396 9212 ns/op 4175 B/op 18 allocs/op
|
||||
BenchmarkParseP-16 503778 2310 ns/op 4176 B/op 18 allocs/op
|
||||
BenchmarkParseFragment-16 143725 8158 ns/op 10193 B/op 9 allocs/op
|
||||
BenchmarkSchemaParse-16 240609 5060 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP-16 785116 1534 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 11.092s
|
@ -1,17 +0,0 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/core/internal/qcode
|
||||
BenchmarkQCompile
|
||||
BenchmarkQCompile-16 129614 8649 ns/op 3756 B/op 28 allocs/op
|
||||
BenchmarkQCompileP
|
||||
BenchmarkQCompileP-16 487488 2525 ns/op 3792 B/op 28 allocs/op
|
||||
BenchmarkParse
|
||||
BenchmarkParse-16 127582 8731 ns/op 3902 B/op 18 allocs/op
|
||||
BenchmarkParseP
|
||||
BenchmarkParseP-16 561373 2223 ns/op 3903 B/op 18 allocs/op
|
||||
BenchmarkSchemaParse
|
||||
BenchmarkSchemaParse-16 209142 5523 ns/op 3968 B/op 57 allocs/op
|
||||
BenchmarkSchemaParseP
|
||||
BenchmarkSchemaParseP-16 716437 1734 ns/op 3968 B/op 57 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/core/internal/qcode 8.483s
|
@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
cd corpus && rm -rf $(find . ! -name '00?.gql')
|
@ -1,133 +0,0 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
DefaultBlock bool
|
||||
Blocklist []string
|
||||
}
|
||||
|
||||
type QueryConfig struct {
|
||||
Limit int
|
||||
Filters []string
|
||||
Columns []string
|
||||
DisableFunctions bool
|
||||
Block bool
|
||||
}
|
||||
|
||||
type InsertConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block bool
|
||||
}
|
||||
|
||||
type UpdateConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Presets map[string]string
|
||||
Block bool
|
||||
}
|
||||
|
||||
type DeleteConfig struct {
|
||||
Filters []string
|
||||
Columns []string
|
||||
Block bool
|
||||
}
|
||||
|
||||
type TRConfig struct {
|
||||
Query QueryConfig
|
||||
Insert InsertConfig
|
||||
Update UpdateConfig
|
||||
Delete DeleteConfig
|
||||
}
|
||||
|
||||
type trval struct {
|
||||
query struct {
|
||||
limit string
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
disable struct{ funcs bool }
|
||||
block bool
|
||||
}
|
||||
|
||||
insert struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
psmap map[string]string
|
||||
pslist []string
|
||||
block bool
|
||||
}
|
||||
|
||||
update struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
psmap map[string]string
|
||||
pslist []string
|
||||
block bool
|
||||
}
|
||||
|
||||
delete struct {
|
||||
fil *Exp
|
||||
filNU bool
|
||||
cols map[string]struct{}
|
||||
block bool
|
||||
}
|
||||
}
|
||||
|
||||
func (trv *trval) allowedColumns(qt QType) map[string]struct{} {
|
||||
switch qt {
|
||||
case QTQuery:
|
||||
return trv.query.cols
|
||||
case QTInsert:
|
||||
return trv.insert.cols
|
||||
case QTUpdate:
|
||||
return trv.update.cols
|
||||
case QTDelete:
|
||||
return trv.delete.cols
|
||||
case QTUpsert:
|
||||
return trv.insert.cols
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (trv *trval) filter(qt QType) (*Exp, bool) {
|
||||
switch qt {
|
||||
case QTQuery:
|
||||
return trv.query.fil, trv.query.filNU
|
||||
case QTInsert:
|
||||
return trv.insert.fil, trv.insert.filNU
|
||||
case QTUpdate:
|
||||
return trv.update.fil, trv.update.filNU
|
||||
case QTDelete:
|
||||
return trv.delete.fil, trv.delete.filNU
|
||||
case QTUpsert:
|
||||
return trv.insert.fil, trv.insert.filNU
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func listToMap(list []string) map[string]struct{} {
|
||||
m := make(map[string]struct{}, len(list))
|
||||
for i := range list {
|
||||
m[strings.ToLower(list[i])] = struct{}{}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func mapToList(m map[string]string) []string {
|
||||
list := []string{}
|
||||
for k := range m {
|
||||
list = append(list, strings.ToLower(k))
|
||||
}
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
// +build gofuzz
|
||||
|
||||
package qcode
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func Fuzz(data []byte) int {
|
||||
qt := GetQType(string(data))
|
||||
|
||||
if qt > QTUpsert {
|
||||
panic("qt > QTUpsert")
|
||||
}
|
||||
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile(data, "user")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
@ -1,761 +0,0 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
errEOT = errors.New("end of tokens")
|
||||
)
|
||||
|
||||
type parserType int32
|
||||
|
||||
const (
|
||||
maxFields = 1200
|
||||
maxArgs = 25
|
||||
)
|
||||
|
||||
const (
|
||||
parserError parserType = iota
|
||||
parserEOF
|
||||
opQuery
|
||||
opMutate
|
||||
opSub
|
||||
NodeStr
|
||||
NodeInt
|
||||
NodeFloat
|
||||
NodeBool
|
||||
NodeObj
|
||||
NodeList
|
||||
NodeVar
|
||||
)
|
||||
|
||||
type Operation struct {
|
||||
Type parserType
|
||||
Name string
|
||||
Args []Arg
|
||||
argsA [10]Arg
|
||||
Fields []Field
|
||||
fieldsA [10]Field
|
||||
}
|
||||
|
||||
var zeroOperation = Operation{}
|
||||
|
||||
func (o *Operation) Reset() {
|
||||
*o = zeroOperation
|
||||
}
|
||||
|
||||
type Fragment struct {
|
||||
Name string
|
||||
On string
|
||||
Fields []Field
|
||||
fieldsA [10]Field
|
||||
}
|
||||
|
||||
var zeroFragment = Fragment{}
|
||||
|
||||
func (f *Fragment) Reset() {
|
||||
*f = zeroFragment
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
Name string
|
||||
Alias string
|
||||
Args []Arg
|
||||
argsA [5]Arg
|
||||
Children []int32
|
||||
childrenA [5]int32
|
||||
Union bool
|
||||
}
|
||||
|
||||
type Arg struct {
|
||||
Name string
|
||||
Val *Node
|
||||
df bool
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Type parserType
|
||||
Name string
|
||||
Val string
|
||||
Parent *Node
|
||||
Children []*Node
|
||||
exp *Exp
|
||||
}
|
||||
|
||||
var zeroNode = Node{}
|
||||
|
||||
func (n *Node) Reset() {
|
||||
*n = zeroNode
|
||||
}
|
||||
|
||||
type Parser struct {
|
||||
frags map[uint64]*Fragment
|
||||
h maphash.Hash
|
||||
input []byte // the string being scanned
|
||||
pos int
|
||||
items []item
|
||||
err error
|
||||
}
|
||||
|
||||
var nodePool = sync.Pool{
|
||||
New: func() interface{} { return new(Node) },
|
||||
}
|
||||
|
||||
var opPool = sync.Pool{
|
||||
New: func() interface{} { return new(Operation) },
|
||||
}
|
||||
|
||||
var fragPool = sync.Pool{
|
||||
New: func() interface{} { return new(Fragment) },
|
||||
}
|
||||
|
||||
var lexPool = sync.Pool{
|
||||
New: func() interface{} { return new(lexer) },
|
||||
}
|
||||
|
||||
func Parse(gql []byte) (*Operation, error) {
|
||||
var err error
|
||||
|
||||
if len(gql) == 0 {
|
||||
return nil, errors.New("blank query")
|
||||
}
|
||||
|
||||
l := lexPool.Get().(*lexer)
|
||||
l.Reset()
|
||||
defer lexPool.Put(l)
|
||||
|
||||
if err = lex(l, gql); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &Parser{
|
||||
input: l.input,
|
||||
pos: -1,
|
||||
items: l.items,
|
||||
}
|
||||
|
||||
op := opPool.Get().(*Operation)
|
||||
op.Reset()
|
||||
op.Fields = op.fieldsA[:0]
|
||||
|
||||
s := -1
|
||||
qf := false
|
||||
|
||||
for {
|
||||
if p.peek(itemEOF) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
|
||||
if p.peek(itemFragment) {
|
||||
p.ignore()
|
||||
if f, err := p.parseFragment(); err != nil {
|
||||
fragPool.Put(f)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
if !qf && p.peek(itemQuery, itemMutation, itemSub, itemObjOpen) {
|
||||
s = p.pos
|
||||
qf = true
|
||||
}
|
||||
p.ignore()
|
||||
}
|
||||
}
|
||||
|
||||
p.reset(s)
|
||||
if err := p.parseOp(op); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range p.frags {
|
||||
fragPool.Put(v)
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseFragment() (*Fragment, error) {
|
||||
var err error
|
||||
|
||||
frag := fragPool.Get().(*Fragment)
|
||||
frag.Reset()
|
||||
frag.Fields = frag.fieldsA[:0]
|
||||
|
||||
if p.peek(itemName) {
|
||||
frag.Name = p.val(p.next())
|
||||
} else {
|
||||
return frag, errors.New("fragment: missing name")
|
||||
}
|
||||
|
||||
if p.peek(itemOn) {
|
||||
p.ignore()
|
||||
} else {
|
||||
return frag, errors.New("fragment: missing 'on' keyword")
|
||||
}
|
||||
|
||||
if p.peek(itemName) {
|
||||
frag.On = p.vall(p.next())
|
||||
} else {
|
||||
return frag, errors.New("fragment: missing table name after 'on' keyword")
|
||||
}
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
} else {
|
||||
return frag, fmt.Errorf("fragment: expecting a '{', got: %s", p.next())
|
||||
}
|
||||
|
||||
frag.Fields, err = p.parseFields(frag.Fields)
|
||||
if err != nil {
|
||||
return frag, fmt.Errorf("fragment: %v", err)
|
||||
}
|
||||
|
||||
if p.frags == nil {
|
||||
p.frags = make(map[uint64]*Fragment)
|
||||
}
|
||||
|
||||
_, _ = p.h.WriteString(frag.Name)
|
||||
k := p.h.Sum64()
|
||||
p.h.Reset()
|
||||
|
||||
p.frags[k] = frag
|
||||
|
||||
return frag, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseOp(op *Operation) error {
|
||||
var err error
|
||||
var typeSet bool
|
||||
|
||||
if p.peek(itemQuery, itemMutation, itemSub) {
|
||||
err = p.parseOpTypeAndArgs(op)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %v", op.Type, err)
|
||||
}
|
||||
typeSet = true
|
||||
}
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
if !typeSet {
|
||||
op.Type = opQuery
|
||||
}
|
||||
|
||||
for {
|
||||
if p.peek(itemEOF, itemFragment) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
|
||||
op.Fields, err = p.parseFields(op.Fields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %v", op.Type, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("expecting a query, mutation or subscription, got: %s", p.next())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseOpTypeAndArgs(op *Operation) error {
|
||||
item := p.next()
|
||||
|
||||
switch item._type {
|
||||
case itemQuery:
|
||||
op.Type = opQuery
|
||||
case itemMutation:
|
||||
op.Type = opMutate
|
||||
case itemSub:
|
||||
op.Type = opSub
|
||||
}
|
||||
|
||||
op.Args = op.argsA[:0]
|
||||
|
||||
var err error
|
||||
|
||||
if p.peek(itemName) {
|
||||
op.Name = p.val(p.next())
|
||||
}
|
||||
|
||||
if p.peek(itemArgsOpen) {
|
||||
p.ignore()
|
||||
|
||||
op.Args, err = p.parseOpParams(op.Args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseArgValue(argVal string) (*Node, error) {
|
||||
l := lexPool.Get().(*lexer)
|
||||
l.Reset()
|
||||
|
||||
if err := lex(l, []byte(argVal)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &Parser{
|
||||
input: l.input,
|
||||
pos: -1,
|
||||
items: l.items,
|
||||
}
|
||||
op, err := p.parseValue()
|
||||
lexPool.Put(l)
|
||||
|
||||
return op, err
|
||||
}
|
||||
|
||||
func (p *Parser) parseFields(fields []Field) ([]Field, error) {
|
||||
var err error
|
||||
st := NewStack()
|
||||
|
||||
if !p.peek(itemName, itemSpread) {
|
||||
return nil, fmt.Errorf("unexpected token: %s", p.peekNext())
|
||||
}
|
||||
|
||||
for {
|
||||
if p.peek(itemEOF) {
|
||||
p.ignore()
|
||||
return nil, errors.New("invalid query")
|
||||
}
|
||||
|
||||
if p.peek(itemObjClose) {
|
||||
p.ignore()
|
||||
|
||||
if st.Len() != 0 {
|
||||
st.Pop()
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(fields) >= maxFields {
|
||||
return nil, fmt.Errorf("too many fields (max %d)", maxFields)
|
||||
}
|
||||
|
||||
isFrag := false
|
||||
|
||||
if p.peek(itemSpread) {
|
||||
p.ignore()
|
||||
isFrag = true
|
||||
}
|
||||
|
||||
if isFrag {
|
||||
fields, err = p.parseFragmentFields(st, fields)
|
||||
} else {
|
||||
fields, err = p.parseNormalFields(st, fields)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseNormalFields(st *Stack, fields []Field) ([]Field, error) {
|
||||
if !p.peek(itemName) {
|
||||
return nil, fmt.Errorf("expecting an alias or field name, got: %s", p.next())
|
||||
}
|
||||
|
||||
fields = append(fields, Field{ID: int32(len(fields))})
|
||||
|
||||
f := &fields[(len(fields) - 1)]
|
||||
f.Args = f.argsA[:0]
|
||||
f.Children = f.childrenA[:0]
|
||||
|
||||
// Parse the field
|
||||
if err := p.parseField(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if st.Len() == 0 {
|
||||
f.ParentID = -1
|
||||
} else {
|
||||
pid := st.Peek()
|
||||
f.ParentID = pid
|
||||
fields[pid].Children = append(fields[pid].Children, f.ID)
|
||||
}
|
||||
|
||||
// The first opening curley brackets after this
|
||||
// comes the columns or child fields
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
st.Push(f.ID)
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseFragmentFields(st *Stack, fields []Field) ([]Field, error) {
|
||||
var err error
|
||||
pid := st.Peek()
|
||||
|
||||
if p.peek(itemOn) {
|
||||
p.ignore()
|
||||
fields[pid].Union = true
|
||||
|
||||
if fields, err = p.parseNormalFields(st, fields); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If parent is a union selector than copy over args from the parent
|
||||
// to the first child which is the root selector for each union type.
|
||||
for i := pid + 1; i < int32(len(fields)); i++ {
|
||||
f := &fields[i]
|
||||
if f.ParentID == pid {
|
||||
f.Args = fields[pid].Args
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
if !p.peek(itemName) {
|
||||
return nil, fmt.Errorf("expecting a fragment name, got: %s", p.next())
|
||||
}
|
||||
|
||||
name := p.val(p.next())
|
||||
_, _ = p.h.WriteString(name)
|
||||
id := p.h.Sum64()
|
||||
p.h.Reset()
|
||||
|
||||
fr, ok := p.frags[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no fragment named '%s' defined", name)
|
||||
}
|
||||
ff := fr.Fields
|
||||
|
||||
n := int32(len(fields))
|
||||
fields = append(fields, ff...)
|
||||
|
||||
for i := 0; i < len(ff); i++ {
|
||||
k := (n + int32(i))
|
||||
f := &fields[k]
|
||||
f.ID = int32(k)
|
||||
|
||||
// If this is the top-level point the parent to the parent of the
|
||||
// previous field.
|
||||
if f.ParentID == -1 {
|
||||
f.ParentID = pid
|
||||
if f.ParentID != -1 {
|
||||
fields[pid].Children = append(fields[pid].Children, f.ID)
|
||||
}
|
||||
// Update all the other parents id's by our new place in this new array
|
||||
} else {
|
||||
f.ParentID += n
|
||||
}
|
||||
|
||||
// Copy over children since fields append is not a deep copy
|
||||
f.Children = make([]int32, len(f.Children))
|
||||
copy(f.Children, ff[i].Children)
|
||||
|
||||
// Copy over args since args append is not a deep copy
|
||||
f.Args = make([]Arg, len(f.Args))
|
||||
copy(f.Args, ff[i].Args)
|
||||
|
||||
// Update all the children which is needed.
|
||||
for j := range f.Children {
|
||||
f.Children[j] += n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseField(f *Field) error {
|
||||
var err error
|
||||
v := p.next()
|
||||
|
||||
if p.peek(itemColon) {
|
||||
p.ignore()
|
||||
|
||||
if p.peek(itemName) {
|
||||
f.Alias = p.val(v)
|
||||
f.Name = p.vall(p.next())
|
||||
} else {
|
||||
return errors.New("expecting an aliased field name")
|
||||
}
|
||||
} else {
|
||||
f.Name = p.vall(v)
|
||||
}
|
||||
|
||||
if p.peek(itemArgsOpen) {
|
||||
p.ignore()
|
||||
if f.Args, err = p.parseArgs(f.Args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseOpParams(args []Arg) ([]Arg, error) {
|
||||
for {
|
||||
if len(args) >= maxArgs {
|
||||
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
|
||||
}
|
||||
|
||||
if p.peek(itemEOF, itemArgsClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
p.next()
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
|
||||
var err error
|
||||
|
||||
for {
|
||||
if len(args) >= maxArgs {
|
||||
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
|
||||
}
|
||||
|
||||
if p.peek(itemEOF, itemArgsClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
|
||||
if !p.peek(itemName) {
|
||||
return nil, errors.New("expecting an argument name")
|
||||
}
|
||||
args = append(args, Arg{Name: p.val(p.next())})
|
||||
arg := &args[(len(args) - 1)]
|
||||
|
||||
if !p.peek(itemColon) {
|
||||
return nil, errors.New("missing ':' after argument name")
|
||||
}
|
||||
p.ignore()
|
||||
|
||||
arg.Val, err = p.parseValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseList() (*Node, error) {
|
||||
nodes := []*Node{}
|
||||
|
||||
parent := nodePool.Get().(*Node)
|
||||
parent.Reset()
|
||||
|
||||
var ty parserType
|
||||
for {
|
||||
if p.peek(itemListClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
node, err := p.parseValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ty == 0 {
|
||||
ty = node.Type
|
||||
} else if ty != node.Type {
|
||||
return nil, errors.New("All values in a list must be of the same type")
|
||||
}
|
||||
node.Parent = parent
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.New("List cannot be empty")
|
||||
}
|
||||
|
||||
parent.Type = NodeList
|
||||
parent.Children = nodes
|
||||
|
||||
return parent, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseObj() (*Node, error) {
|
||||
nodes := []*Node{}
|
||||
|
||||
parent := nodePool.Get().(*Node)
|
||||
parent.Reset()
|
||||
|
||||
for {
|
||||
if p.peek(itemEOF, itemObjClose) {
|
||||
p.ignore()
|
||||
break
|
||||
}
|
||||
|
||||
if !p.peek(itemName) {
|
||||
return nil, errors.New("expecting an argument name")
|
||||
}
|
||||
nodeName := p.val(p.next())
|
||||
|
||||
if !p.peek(itemColon) {
|
||||
return nil, errors.New("missing ':' after Field argument name")
|
||||
}
|
||||
p.ignore()
|
||||
|
||||
node, err := p.parseValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.Name = nodeName
|
||||
node.Parent = parent
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
parent.Type = NodeObj
|
||||
parent.Children = nodes
|
||||
|
||||
return parent, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseValue() (*Node, error) {
|
||||
if p.peek(itemListOpen) {
|
||||
p.ignore()
|
||||
return p.parseList()
|
||||
}
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
return p.parseObj()
|
||||
}
|
||||
|
||||
item := p.next()
|
||||
node := nodePool.Get().(*Node)
|
||||
node.Reset()
|
||||
|
||||
switch item._type {
|
||||
case itemIntVal:
|
||||
node.Type = NodeInt
|
||||
case itemFloatVal:
|
||||
node.Type = NodeFloat
|
||||
case itemStringVal:
|
||||
node.Type = NodeStr
|
||||
case itemBoolVal:
|
||||
node.Type = NodeBool
|
||||
case itemName:
|
||||
node.Type = NodeStr
|
||||
case itemVariable:
|
||||
node.Type = NodeVar
|
||||
default:
|
||||
return nil, fmt.Errorf("expecting a number, string, object, list or variable as an argument value (not %s)", p.val(p.next()))
|
||||
}
|
||||
node.Val = p.val(item)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) val(v item) string {
|
||||
return b2s(p.input[v.pos:v.end])
|
||||
}
|
||||
|
||||
func (p *Parser) vall(v item) string {
|
||||
lowercase(p.input, v.pos, v.end)
|
||||
return b2s(p.input[v.pos:v.end])
|
||||
}
|
||||
|
||||
func (p *Parser) peek(types ...itemType) bool {
|
||||
n := p.pos + 1
|
||||
l := len(types)
|
||||
// if p.items[n]._type == itemEOF {
|
||||
// return false
|
||||
// }
|
||||
|
||||
if n >= len(p.items) {
|
||||
return types[0] == itemEOF
|
||||
}
|
||||
|
||||
if l == 1 {
|
||||
return p.items[n]._type == types[0]
|
||||
}
|
||||
|
||||
for i := 0; i < l; i++ {
|
||||
if p.items[n]._type == types[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Parser) next() item {
|
||||
n := p.pos + 1
|
||||
if n >= len(p.items) {
|
||||
p.err = errEOT
|
||||
return item{_type: itemEOF}
|
||||
}
|
||||
p.pos = n
|
||||
return p.items[p.pos]
|
||||
}
|
||||
|
||||
func (p *Parser) ignore() {
|
||||
n := p.pos + 1
|
||||
if n >= len(p.items) {
|
||||
p.err = errEOT
|
||||
return
|
||||
}
|
||||
p.pos = n
|
||||
}
|
||||
|
||||
func (p *Parser) peekNext() string {
|
||||
item := p.items[p.pos+1]
|
||||
return b2s(p.input[item.pos:item.end])
|
||||
}
|
||||
|
||||
func (p *Parser) reset(to int) {
|
||||
p.pos = to
|
||||
}
|
||||
|
||||
func b2s(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func (t parserType) String() string {
|
||||
var v string
|
||||
|
||||
switch t {
|
||||
case parserEOF:
|
||||
v = "EOF"
|
||||
case parserError:
|
||||
v = "error"
|
||||
case opQuery:
|
||||
v = "query"
|
||||
case opMutate:
|
||||
v = "mutation"
|
||||
case opSub:
|
||||
v = "subscription"
|
||||
case NodeStr:
|
||||
v = "node-string"
|
||||
case NodeInt:
|
||||
v = "node-int"
|
||||
case NodeFloat:
|
||||
v = "node-float"
|
||||
case NodeBool:
|
||||
v = "node-bool"
|
||||
case NodeVar:
|
||||
v = "node-var"
|
||||
case NodeObj:
|
||||
v = "node-obj"
|
||||
case NodeList:
|
||||
v = "node-list"
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func FreeNode(n *Node) {
|
||||
nodePool.Put(n)
|
||||
}
|
@ -1,377 +0,0 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/chirino/graphql/schema"
|
||||
)
|
||||
|
||||
func TestCompile1(t *testing.T) {
|
||||
qc, _ := NewCompiler(Config{})
|
||||
err := qc.AddRole("user", "product", TRConfig{
|
||||
Query: QueryConfig{
|
||||
Columns: []string{"id", "Name"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = qc.Compile([]byte(`
|
||||
query { product(id: 15) {
|
||||
id
|
||||
name
|
||||
} }`), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("this should be an error id must be a variable"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompile2(t *testing.T) {
|
||||
qc, _ := NewCompiler(Config{})
|
||||
err := qc.AddRole("user", "product", TRConfig{
|
||||
Query: QueryConfig{
|
||||
Columns: []string{"ID"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = qc.Compile([]byte(`
|
||||
query { product(id: $id) {
|
||||
id
|
||||
name
|
||||
} }`), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompile3(t *testing.T) {
|
||||
qc, _ := NewCompiler(Config{})
|
||||
err := qc.AddRole("user", "product", TRConfig{
|
||||
Query: QueryConfig{
|
||||
Columns: []string{"ID"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = qc.Compile([]byte(`
|
||||
mutation {
|
||||
product(id: $test, name: "Test") {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCompile1(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(`#`), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCompile2(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(`{u(where:{not:0})}`), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyCompile(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(``), "user")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPostfixCompile(t *testing.T) {
|
||||
gql := `mutation
|
||||
updateThread {
|
||||
thread(update: $data, where: { slug: { eq: $slug } }) {
|
||||
slug
|
||||
title
|
||||
published
|
||||
createdAt : created_at
|
||||
totalVotes : cached_votes_total
|
||||
totalPosts : cached_posts_total
|
||||
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
|
||||
id
|
||||
}
|
||||
topics {
|
||||
slug
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}}`
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(gql), "anon")
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFragmentsCompile1(t *testing.T) {
|
||||
gql := `
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
}
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
}
|
||||
`
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(gql), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFragmentsCompile2(t *testing.T) {
|
||||
gql := `
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
}
|
||||
}
|
||||
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
}`
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(gql), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFragmentsCompile3(t *testing.T) {
|
||||
gql := `
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
}
|
||||
}
|
||||
|
||||
`
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(gql), "user")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var gql = []byte(`
|
||||
{products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}}`)
|
||||
|
||||
var gqlWithFragments = []byte(`
|
||||
fragment userFields1 on user {
|
||||
id
|
||||
email
|
||||
__typename
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
...userFields2
|
||||
|
||||
created_at
|
||||
...userFields1
|
||||
__typename
|
||||
}
|
||||
}
|
||||
|
||||
fragment userFields2 on user {
|
||||
first_name
|
||||
last_name
|
||||
__typename
|
||||
}`)
|
||||
|
||||
func BenchmarkQCompile(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := qcompile.Compile(gql, "user")
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQCompileP(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := qcompile.Compile(gql, "user")
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkQCompileFragment(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := qcompile.Compile(gqlWithFragments, "user")
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := Parse(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseP(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := Parse(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkParseFragment(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := Parse(gqlWithFragments)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSchemaParse(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for n := 0; n < b.N; n++ {
|
||||
doc := schema.QueryDocument{}
|
||||
err := doc.Parse(string(gql))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSchemaParseP(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
doc := schema.QueryDocument{}
|
||||
err := doc.Parse(string(gql))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1,1307 +0,0 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/util"
|
||||
"github.com/gobuffalo/flect"
|
||||
)
|
||||
|
||||
type QType int
|
||||
type SType int
|
||||
type Action int
|
||||
|
||||
const (
|
||||
maxSelectors = 30
|
||||
)
|
||||
|
||||
const (
|
||||
QTUnknown QType = iota
|
||||
QTQuery
|
||||
QTMutation
|
||||
QTInsert
|
||||
QTUpdate
|
||||
QTDelete
|
||||
QTUpsert
|
||||
)
|
||||
|
||||
const (
|
||||
STNone SType = iota
|
||||
STUnion
|
||||
STMember
|
||||
)
|
||||
|
||||
type QCode struct {
|
||||
Type QType
|
||||
ActionVar string
|
||||
Selects []Select
|
||||
Roots []int32
|
||||
rootsA [5]int32
|
||||
}
|
||||
|
||||
type Select struct {
|
||||
ID int32
|
||||
ParentID int32
|
||||
UParentID int32
|
||||
Type SType
|
||||
Args map[string]*Node
|
||||
Name string
|
||||
FieldName string
|
||||
Cols []Column
|
||||
Where *Exp
|
||||
OrderBy []*OrderBy
|
||||
DistinctOn []string
|
||||
Paging Paging
|
||||
Children []int32
|
||||
Functions bool
|
||||
Allowed map[string]struct{}
|
||||
PresetMap map[string]string
|
||||
PresetList []string
|
||||
SkipRender bool
|
||||
}
|
||||
|
||||
type Column struct {
|
||||
Table string
|
||||
Name string
|
||||
FieldName string
|
||||
}
|
||||
|
||||
type Exp struct {
|
||||
Op ExpOp
|
||||
Col string
|
||||
NestedCols []string
|
||||
Type ValType
|
||||
Table string
|
||||
Val string
|
||||
ListType ValType
|
||||
ListVal []string
|
||||
Children []*Exp
|
||||
childrenA [5]*Exp
|
||||
doFree bool
|
||||
}
|
||||
|
||||
var zeroExp = Exp{doFree: true}
|
||||
|
||||
func (ex *Exp) Reset() {
|
||||
*ex = zeroExp
|
||||
}
|
||||
|
||||
type OrderBy struct {
|
||||
Col string
|
||||
Order Order
|
||||
}
|
||||
|
||||
type PagingType int
|
||||
|
||||
const (
|
||||
PtOffset PagingType = iota
|
||||
PtForward
|
||||
PtBackward
|
||||
)
|
||||
|
||||
type Paging struct {
|
||||
Type PagingType
|
||||
Limit string
|
||||
Offset string
|
||||
Cursor bool
|
||||
NoLimit bool
|
||||
}
|
||||
|
||||
type ExpOp int
|
||||
|
||||
const (
|
||||
OpNop ExpOp = iota
|
||||
OpAnd
|
||||
OpOr
|
||||
OpNot
|
||||
OpEquals
|
||||
OpNotEquals
|
||||
OpGreaterOrEquals
|
||||
OpLesserOrEquals
|
||||
OpGreaterThan
|
||||
OpLesserThan
|
||||
OpIn
|
||||
OpNotIn
|
||||
OpLike
|
||||
OpNotLike
|
||||
OpILike
|
||||
OpNotILike
|
||||
OpSimilar
|
||||
OpNotSimilar
|
||||
OpContains
|
||||
OpContainedIn
|
||||
OpHasKey
|
||||
OpHasKeyAny
|
||||
OpHasKeyAll
|
||||
OpIsNull
|
||||
OpEqID
|
||||
OpTsQuery
|
||||
OpFalse
|
||||
OpNotDistinct
|
||||
OpDistinct
|
||||
)
|
||||
|
||||
type ValType int
|
||||
|
||||
const (
|
||||
ValStr ValType = iota + 1
|
||||
ValInt
|
||||
ValFloat
|
||||
ValBool
|
||||
ValList
|
||||
ValVar
|
||||
ValNone
|
||||
ValRef
|
||||
)
|
||||
|
||||
type AggregrateOp int
|
||||
|
||||
const (
|
||||
AgCount AggregrateOp = iota + 1
|
||||
AgSum
|
||||
AgAvg
|
||||
AgMax
|
||||
AgMin
|
||||
)
|
||||
|
||||
type Order int
|
||||
|
||||
const (
|
||||
OrderAsc Order = iota + 1
|
||||
OrderDesc
|
||||
OrderAscNullsFirst
|
||||
OrderAscNullsLast
|
||||
OrderDescNullsFirst
|
||||
OrderDescNullsLast
|
||||
)
|
||||
|
||||
type Compiler struct {
|
||||
tr map[string]map[string]*trval
|
||||
bl map[string]struct{}
|
||||
|
||||
defBlock bool
|
||||
}
|
||||
|
||||
var expPool = sync.Pool{
|
||||
New: func() interface{} { return &Exp{doFree: true} },
|
||||
}
|
||||
|
||||
func NewCompiler(c Config) (*Compiler, error) {
|
||||
co := &Compiler{defBlock: c.DefaultBlock}
|
||||
co.tr = make(map[string]map[string]*trval)
|
||||
co.bl = make(map[string]struct{}, len(c.Blocklist))
|
||||
|
||||
for i := range c.Blocklist {
|
||||
co.bl[strings.ToLower(c.Blocklist[i])] = struct{}{}
|
||||
}
|
||||
|
||||
seedExp := [100]Exp{}
|
||||
|
||||
for i := range seedExp {
|
||||
seedExp[i].doFree = true
|
||||
expPool.Put(&seedExp[i])
|
||||
}
|
||||
|
||||
return co, nil
|
||||
}
|
||||
|
||||
func NewFilter() *Exp {
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
return ex
|
||||
}
|
||||
|
||||
func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
|
||||
var err error
|
||||
trv := &trval{}
|
||||
|
||||
// query config
|
||||
trv.query.fil, trv.query.filNU, err = compileFilter(trc.Query.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if trc.Query.Limit > 0 {
|
||||
trv.query.limit = strconv.Itoa(trc.Query.Limit)
|
||||
}
|
||||
trv.query.cols = listToMap(trc.Query.Columns)
|
||||
trv.query.disable.funcs = trc.Query.DisableFunctions
|
||||
trv.query.block = trc.Query.Block
|
||||
|
||||
// insert config
|
||||
trv.insert.fil, trv.insert.filNU, err = compileFilter(trc.Insert.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trv.insert.cols = listToMap(trc.Insert.Columns)
|
||||
trv.insert.psmap = trc.Insert.Presets
|
||||
trv.insert.pslist = mapToList(trv.insert.psmap)
|
||||
trv.insert.block = trc.Insert.Block
|
||||
|
||||
// update config
|
||||
trv.update.fil, trv.update.filNU, err = compileFilter(trc.Update.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trv.update.cols = listToMap(trc.Update.Columns)
|
||||
trv.update.psmap = trc.Update.Presets
|
||||
trv.update.pslist = mapToList(trv.update.psmap)
|
||||
trv.update.block = trc.Update.Block
|
||||
|
||||
// delete config
|
||||
trv.delete.fil, trv.delete.filNU, err = compileFilter(trc.Delete.Filters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trv.delete.cols = listToMap(trc.Delete.Columns)
|
||||
trv.delete.block = trc.Delete.Block
|
||||
|
||||
singular := flect.Singularize(table)
|
||||
plural := flect.Pluralize(table)
|
||||
|
||||
if _, ok := com.tr[role]; !ok {
|
||||
com.tr[role] = make(map[string]*trval)
|
||||
}
|
||||
|
||||
com.tr[role][singular] = trv
|
||||
com.tr[role][plural] = trv
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) Compile(query []byte, role string) (*QCode, error) {
|
||||
var err error
|
||||
|
||||
qc := QCode{Type: QTQuery}
|
||||
qc.Roots = qc.rootsA[:0]
|
||||
|
||||
op, err := Parse(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = com.compileQuery(&qc, op, role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
freeNodes(op)
|
||||
opPool.Put(op)
|
||||
|
||||
return &qc, nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
|
||||
id := int32(0)
|
||||
|
||||
if len(op.Fields) == 0 {
|
||||
return errors.New("invalid graphql no query found")
|
||||
}
|
||||
|
||||
if op.Type == opMutate {
|
||||
if err := com.setMutationType(qc, op.Fields[0].Args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
selects := make([]Select, 0, 5)
|
||||
st := NewStack()
|
||||
action := qc.Type
|
||||
|
||||
if len(op.Fields) == 0 {
|
||||
return errors.New("empty query")
|
||||
}
|
||||
|
||||
for i := range op.Fields {
|
||||
if op.Fields[i].ParentID == -1 {
|
||||
val := op.Fields[i].ID | (-1 << 16)
|
||||
st.Push(val)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if id >= maxSelectors {
|
||||
return fmt.Errorf("selector limit reached (%d)", maxSelectors)
|
||||
}
|
||||
|
||||
val := st.Pop()
|
||||
fid := val & 0xFFFF
|
||||
parentID := (val >> 16) & 0xFFFF
|
||||
|
||||
field := &op.Fields[fid]
|
||||
|
||||
if _, ok := com.bl[field.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if field.ParentID == -1 {
|
||||
parentID = -1
|
||||
}
|
||||
|
||||
trv := com.getRole(role, field.Name)
|
||||
skipRender := false
|
||||
|
||||
if trv != nil {
|
||||
switch action {
|
||||
case QTQuery:
|
||||
if trv.query.block {
|
||||
skipRender = true
|
||||
}
|
||||
|
||||
case QTInsert:
|
||||
if trv.insert.block {
|
||||
return fmt.Errorf("%s, insert blocked: %s", role, field.Name)
|
||||
}
|
||||
|
||||
case QTUpdate:
|
||||
if trv.update.block {
|
||||
return fmt.Errorf("%s, update blocked: %s", role, field.Name)
|
||||
}
|
||||
|
||||
case QTDelete:
|
||||
if trv.delete.block {
|
||||
return fmt.Errorf("%s, delete blocked: %s", role, field.Name)
|
||||
}
|
||||
}
|
||||
|
||||
} else if role == "anon" {
|
||||
skipRender = com.defBlock
|
||||
}
|
||||
|
||||
selects = append(selects, Select{
|
||||
ID: id,
|
||||
ParentID: parentID,
|
||||
Name: field.Name,
|
||||
SkipRender: skipRender,
|
||||
})
|
||||
s := &selects[(len(selects) - 1)]
|
||||
|
||||
if field.Union {
|
||||
s.Type = STUnion
|
||||
}
|
||||
|
||||
if field.Alias != "" {
|
||||
s.FieldName = field.Alias
|
||||
} else {
|
||||
s.FieldName = s.Name
|
||||
}
|
||||
|
||||
if s.ParentID == -1 {
|
||||
qc.Roots = append(qc.Roots, s.ID)
|
||||
} else {
|
||||
p := &selects[s.ParentID]
|
||||
p.Children = append(p.Children, s.ID)
|
||||
|
||||
if p.Type == STUnion {
|
||||
s.Type = STMember
|
||||
s.UParentID = p.ParentID
|
||||
}
|
||||
}
|
||||
|
||||
if skipRender {
|
||||
id++
|
||||
continue
|
||||
}
|
||||
|
||||
s.Children = make([]int32, 0, 5)
|
||||
s.Functions = true
|
||||
|
||||
if trv != nil {
|
||||
s.Allowed = trv.allowedColumns(action)
|
||||
|
||||
switch action {
|
||||
case QTQuery:
|
||||
s.Functions = !trv.query.disable.funcs
|
||||
s.Paging.Limit = trv.query.limit
|
||||
|
||||
case QTInsert:
|
||||
s.PresetMap = trv.insert.psmap
|
||||
s.PresetList = trv.insert.pslist
|
||||
|
||||
case QTUpdate:
|
||||
s.PresetMap = trv.update.psmap
|
||||
s.PresetList = trv.update.pslist
|
||||
}
|
||||
}
|
||||
|
||||
err := com.compileArgs(qc, s, field.Args, role)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Order is important AddFilters must come after compileArgs
|
||||
com.AddFilters(qc, s, role)
|
||||
|
||||
s.Cols = make([]Column, 0, len(field.Children))
|
||||
cm := make(map[string]struct{})
|
||||
action = QTQuery
|
||||
|
||||
for _, cid := range field.Children {
|
||||
f := op.Fields[cid]
|
||||
|
||||
if _, ok := com.bl[f.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var fname string
|
||||
|
||||
if f.Alias != "" {
|
||||
fname = f.Alias
|
||||
} else {
|
||||
fname = f.Name
|
||||
}
|
||||
|
||||
if _, ok := cm[fname]; ok {
|
||||
continue
|
||||
} else {
|
||||
cm[fname] = struct{}{}
|
||||
}
|
||||
|
||||
if len(f.Children) != 0 {
|
||||
val := f.ID | (s.ID << 16)
|
||||
st.Push(val)
|
||||
continue
|
||||
}
|
||||
|
||||
col := Column{Name: f.Name, FieldName: fname}
|
||||
s.Cols = append(s.Cols, col)
|
||||
}
|
||||
|
||||
id++
|
||||
}
|
||||
|
||||
if id == 0 {
|
||||
return errors.New("invalid query")
|
||||
}
|
||||
|
||||
qc.Selects = selects[:id]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) AddFilters(qc *QCode, sel *Select, role string) {
|
||||
var fil *Exp
|
||||
var nu bool // need user_id (or not) in this filter
|
||||
|
||||
if trv, ok := com.tr[role][sel.Name]; ok {
|
||||
fil, nu = trv.filter(qc.Type)
|
||||
}
|
||||
|
||||
if fil == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if nu && role == "anon" {
|
||||
sel.SkipRender = true
|
||||
}
|
||||
|
||||
switch fil.Op {
|
||||
case OpNop:
|
||||
case OpFalse:
|
||||
sel.Where = fil
|
||||
default:
|
||||
AddFilter(sel, fil)
|
||||
}
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg, role string) error {
|
||||
var err error
|
||||
|
||||
for i := range args {
|
||||
arg := &args[i]
|
||||
|
||||
switch arg.Name {
|
||||
case "id":
|
||||
err = com.compileArgID(sel, arg)
|
||||
|
||||
case "search":
|
||||
err = com.compileArgSearch(sel, arg)
|
||||
|
||||
case "where":
|
||||
err = com.compileArgWhere(sel, arg, role)
|
||||
|
||||
case "orderby", "order_by", "order":
|
||||
err = com.compileArgOrderBy(sel, arg)
|
||||
|
||||
case "distinct_on", "distinct":
|
||||
err = com.compileArgDistinctOn(sel, arg)
|
||||
|
||||
case "limit":
|
||||
err = com.compileArgLimit(sel, arg)
|
||||
|
||||
case "offset":
|
||||
err = com.compileArgOffset(sel, arg)
|
||||
|
||||
case "first":
|
||||
err = com.compileArgFirstLast(sel, arg, PtForward)
|
||||
|
||||
case "last":
|
||||
err = com.compileArgFirstLast(sel, arg, PtBackward)
|
||||
|
||||
case "after":
|
||||
err = com.compileArgAfterBefore(sel, arg, PtForward)
|
||||
|
||||
case "before":
|
||||
err = com.compileArgAfterBefore(sel, arg, PtBackward)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
|
||||
setActionVar := func(arg *Arg) error {
|
||||
if arg.Val.Type != NodeVar {
|
||||
return argErr(arg.Name, "variable")
|
||||
}
|
||||
qc.ActionVar = arg.Val.Val
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
arg := &args[i]
|
||||
|
||||
switch arg.Name {
|
||||
case "insert":
|
||||
qc.Type = QTInsert
|
||||
return setActionVar(arg)
|
||||
case "update":
|
||||
qc.Type = QTUpdate
|
||||
return setActionVar(arg)
|
||||
case "upsert":
|
||||
qc.Type = QTUpsert
|
||||
return setActionVar(arg)
|
||||
case "delete":
|
||||
qc.Type = QTDelete
|
||||
|
||||
if arg.Val.Type != NodeBool {
|
||||
return argErr(arg.Name, "boolen")
|
||||
}
|
||||
|
||||
if arg.Val.Val == "false" {
|
||||
qc.Type = QTQuery
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, bool, error) {
|
||||
if arg.Val.Type != NodeObj {
|
||||
return nil, false, fmt.Errorf("expecting an object")
|
||||
}
|
||||
|
||||
return com.compileArgNode(st, arg.Val, true)
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, bool, error) {
|
||||
var root *Exp
|
||||
var needsUser bool
|
||||
|
||||
if node == nil || len(node.Children) == 0 {
|
||||
return nil, false, errors.New("invalid argument value")
|
||||
}
|
||||
|
||||
pushChild(st, nil, node)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
node, ok := intf.(*Node)
|
||||
if !ok || node == nil {
|
||||
return nil, needsUser, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
// Objects inside a list
|
||||
if node.Name == "" {
|
||||
pushChildren(st, node.exp, node)
|
||||
continue
|
||||
|
||||
} else if _, ok := com.bl[node.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
ex, err := newExp(st, node, usePool)
|
||||
if err != nil {
|
||||
return nil, needsUser, err
|
||||
}
|
||||
|
||||
if ex == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if ex.Type == ValVar && ex.Val == "user_id" {
|
||||
needsUser = true
|
||||
}
|
||||
|
||||
if node.exp == nil {
|
||||
root = ex
|
||||
} else {
|
||||
node.exp.Children = append(node.exp.Children, ex)
|
||||
}
|
||||
}
|
||||
|
||||
return root, needsUser, nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
|
||||
if sel.ID != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sel.Where != nil && sel.Where.Op == OpEqID {
|
||||
return nil
|
||||
}
|
||||
|
||||
if arg.Val.Type != NodeVar {
|
||||
return argErr("id", "variable")
|
||||
}
|
||||
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
ex.Op = OpEqID
|
||||
ex.Type = ValVar
|
||||
ex.Val = arg.Val.Val
|
||||
|
||||
sel.Where = ex
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) error {
|
||||
if arg.Val.Type != NodeVar {
|
||||
return argErr("search", "variable")
|
||||
}
|
||||
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
ex.Op = OpTsQuery
|
||||
ex.Type = ValVar
|
||||
ex.Val = arg.Val.Val
|
||||
|
||||
if sel.Args == nil {
|
||||
sel.Args = make(map[string]*Node)
|
||||
}
|
||||
|
||||
sel.Args[arg.Name] = arg.Val
|
||||
arg.df = true
|
||||
AddFilter(sel, ex)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg, role string) error {
|
||||
st := util.NewStack()
|
||||
var err error
|
||||
|
||||
ex, nu, err := com.compileArgObj(st, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if nu && role == "anon" {
|
||||
sel.SkipRender = true
|
||||
}
|
||||
AddFilter(sel, ex)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
if arg.Val.Type != NodeObj {
|
||||
return fmt.Errorf("expecting an object")
|
||||
}
|
||||
|
||||
st := util.NewStack()
|
||||
|
||||
for i := range arg.Val.Children {
|
||||
st.Push(arg.Val.Children[i])
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
node, ok := intf.(*Node)
|
||||
|
||||
if !ok || node == nil {
|
||||
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if node.Type != NodeStr && node.Type != NodeVar {
|
||||
return fmt.Errorf("expecting a string or variable")
|
||||
}
|
||||
|
||||
ob := &OrderBy{}
|
||||
|
||||
switch node.Val {
|
||||
case "asc":
|
||||
ob.Order = OrderAsc
|
||||
case "desc":
|
||||
ob.Order = OrderDesc
|
||||
case "asc_nulls_first":
|
||||
ob.Order = OrderAscNullsFirst
|
||||
case "desc_nulls_first":
|
||||
ob.Order = OrderDescNullsFirst
|
||||
case "asc_nulls_last":
|
||||
ob.Order = OrderAscNullsLast
|
||||
case "desc_nulls_last":
|
||||
ob.Order = OrderDescNullsLast
|
||||
default:
|
||||
return fmt.Errorf("valid values include asc, desc, asc_nulls_first and desc_nulls_first")
|
||||
}
|
||||
|
||||
setOrderByColName(ob, node)
|
||||
sel.OrderBy = append(sel.OrderBy, ob)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) error {
|
||||
node := arg.Val
|
||||
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if node.Type != NodeList && node.Type != NodeStr {
|
||||
return fmt.Errorf("expecting a list of strings or just a string")
|
||||
}
|
||||
|
||||
if node.Type == NodeStr {
|
||||
sel.DistinctOn = append(sel.DistinctOn, node.Val)
|
||||
}
|
||||
|
||||
for i := range node.Children {
|
||||
sel.DistinctOn = append(sel.DistinctOn, node.Children[i].Val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) error {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != NodeInt {
|
||||
return argErr("limit", "number")
|
||||
}
|
||||
|
||||
sel.Paging.Limit = node.Val
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) error {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != NodeVar {
|
||||
return argErr("offset", "variable")
|
||||
}
|
||||
|
||||
sel.Paging.Offset = node.Val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgFirstLast(sel *Select, arg *Arg, pt PagingType) error {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != NodeInt {
|
||||
return argErr(arg.Name, "number")
|
||||
}
|
||||
|
||||
sel.Paging.Type = pt
|
||||
sel.Paging.Limit = node.Val
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgAfterBefore(sel *Select, arg *Arg, pt PagingType) error {
|
||||
node := arg.Val
|
||||
|
||||
if node.Type != NodeVar || node.Val != "cursor" {
|
||||
return fmt.Errorf("value for argument '%s' must be a variable named $cursor", arg.Name)
|
||||
}
|
||||
sel.Paging.Type = pt
|
||||
sel.Paging.Cursor = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// var zeroTrv = &trval{}
|
||||
|
||||
func (com *Compiler) getRole(role, field string) *trval {
|
||||
if trv, ok := com.tr[role][field]; ok {
|
||||
return trv
|
||||
}
|
||||
|
||||
return nil
|
||||
// } else {
|
||||
// return zeroTrv
|
||||
// }
|
||||
}
|
||||
|
||||
func AddFilter(sel *Select, fil *Exp) {
|
||||
if sel.Where != nil {
|
||||
ow := sel.Where
|
||||
|
||||
if sel.Where.Op != OpAnd || !sel.Where.doFree {
|
||||
sel.Where = expPool.Get().(*Exp)
|
||||
sel.Where.Reset()
|
||||
sel.Where.Op = OpAnd
|
||||
sel.Where.Children = sel.Where.childrenA[:2]
|
||||
sel.Where.Children[0] = fil
|
||||
sel.Where.Children[1] = ow
|
||||
|
||||
} else {
|
||||
sel.Where.Children = append(sel.Where.Children, fil)
|
||||
}
|
||||
|
||||
} else {
|
||||
sel.Where = fil
|
||||
}
|
||||
}
|
||||
|
||||
func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
name := node.Name
|
||||
if name[0] == '_' {
|
||||
name = name[1:]
|
||||
}
|
||||
|
||||
var ex *Exp
|
||||
|
||||
if usePool {
|
||||
ex = expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
} else {
|
||||
ex = &Exp{doFree: false}
|
||||
}
|
||||
|
||||
ex.Children = ex.childrenA[:0]
|
||||
|
||||
switch name {
|
||||
case "and":
|
||||
if len(node.Children) == 0 {
|
||||
return nil, errors.New("missing expression after 'AND' operator")
|
||||
}
|
||||
ex.Op = OpAnd
|
||||
pushChildren(st, ex, node)
|
||||
case "or":
|
||||
if len(node.Children) == 0 {
|
||||
return nil, errors.New("missing expression after 'OR' operator")
|
||||
}
|
||||
ex.Op = OpOr
|
||||
pushChildren(st, ex, node)
|
||||
case "not":
|
||||
if len(node.Children) == 0 {
|
||||
return nil, errors.New("missing expression after 'NOT' operator")
|
||||
}
|
||||
ex.Op = OpNot
|
||||
pushChild(st, ex, node)
|
||||
case "eq", "equals":
|
||||
ex.Op = OpEquals
|
||||
ex.Val = node.Val
|
||||
case "neq", "not_equals":
|
||||
ex.Op = OpNotEquals
|
||||
ex.Val = node.Val
|
||||
case "gt", "greater_than":
|
||||
ex.Op = OpGreaterThan
|
||||
ex.Val = node.Val
|
||||
case "lt", "lesser_than":
|
||||
ex.Op = OpLesserThan
|
||||
ex.Val = node.Val
|
||||
case "gte", "greater_or_equals":
|
||||
ex.Op = OpGreaterOrEquals
|
||||
ex.Val = node.Val
|
||||
case "lte", "lesser_or_equals":
|
||||
ex.Op = OpLesserOrEquals
|
||||
ex.Val = node.Val
|
||||
case "in":
|
||||
ex.Op = OpIn
|
||||
setListVal(ex, node)
|
||||
case "nin", "not_in":
|
||||
ex.Op = OpNotIn
|
||||
setListVal(ex, node)
|
||||
case "like":
|
||||
ex.Op = OpLike
|
||||
ex.Val = node.Val
|
||||
case "nlike", "not_like":
|
||||
ex.Op = OpNotLike
|
||||
ex.Val = node.Val
|
||||
case "ilike":
|
||||
ex.Op = OpILike
|
||||
ex.Val = node.Val
|
||||
case "nilike", "not_ilike":
|
||||
ex.Op = OpILike
|
||||
ex.Val = node.Val
|
||||
case "similar":
|
||||
ex.Op = OpSimilar
|
||||
ex.Val = node.Val
|
||||
case "nsimilar", "not_similar":
|
||||
ex.Op = OpNotSimilar
|
||||
ex.Val = node.Val
|
||||
case "contains":
|
||||
ex.Op = OpContains
|
||||
ex.Val = node.Val
|
||||
case "contained_in":
|
||||
ex.Op = OpContainedIn
|
||||
ex.Val = node.Val
|
||||
case "has_key":
|
||||
ex.Op = OpHasKey
|
||||
ex.Val = node.Val
|
||||
case "has_key_any":
|
||||
ex.Op = OpHasKeyAny
|
||||
ex.Val = node.Val
|
||||
case "has_key_all":
|
||||
ex.Op = OpHasKeyAll
|
||||
ex.Val = node.Val
|
||||
case "is_null":
|
||||
ex.Op = OpIsNull
|
||||
ex.Val = node.Val
|
||||
case "null_eq", "ndis", "not_distinct":
|
||||
ex.Op = OpNotDistinct
|
||||
ex.Val = node.Val
|
||||
case "null_neq", "dis", "distinct":
|
||||
ex.Op = OpDistinct
|
||||
ex.Val = node.Val
|
||||
default:
|
||||
if len(node.Children) == 0 {
|
||||
return nil, fmt.Errorf("[Where] invalid operation: %s", name)
|
||||
}
|
||||
pushChildren(st, node.exp, node)
|
||||
return nil, nil // skip node
|
||||
}
|
||||
|
||||
if ex.Op != OpAnd && ex.Op != OpOr && ex.Op != OpNot {
|
||||
switch node.Type {
|
||||
case NodeStr:
|
||||
ex.Type = ValStr
|
||||
case NodeInt:
|
||||
ex.Type = ValInt
|
||||
case NodeBool:
|
||||
ex.Type = ValBool
|
||||
case NodeFloat:
|
||||
ex.Type = ValFloat
|
||||
case NodeList:
|
||||
ex.Type = ValList
|
||||
case NodeVar:
|
||||
ex.Type = ValVar
|
||||
default:
|
||||
return nil, fmt.Errorf("[Where] invalid values for: %s", name)
|
||||
}
|
||||
|
||||
setWhereColName(ex, node)
|
||||
}
|
||||
|
||||
return ex, nil
|
||||
}
|
||||
|
||||
func setListVal(ex *Exp, node *Node) {
|
||||
if len(node.Children) != 0 {
|
||||
switch node.Children[0].Type {
|
||||
case NodeStr:
|
||||
ex.ListType = ValStr
|
||||
case NodeInt:
|
||||
ex.ListType = ValInt
|
||||
case NodeBool:
|
||||
ex.ListType = ValBool
|
||||
case NodeFloat:
|
||||
ex.ListType = ValFloat
|
||||
}
|
||||
} else {
|
||||
ex.Val = node.Val
|
||||
return
|
||||
}
|
||||
|
||||
for i := range node.Children {
|
||||
ex.ListVal = append(ex.ListVal, node.Children[i].Val)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setWhereColName(ex *Exp, node *Node) {
|
||||
var list []string
|
||||
|
||||
for n := node.Parent; n != nil; n = n.Parent {
|
||||
if n.Type != NodeObj {
|
||||
continue
|
||||
}
|
||||
if n.Name != "" {
|
||||
k := n.Name
|
||||
if k == "and" || k == "or" || k == "not" ||
|
||||
k == "_and" || k == "_or" || k == "_not" {
|
||||
continue
|
||||
}
|
||||
list = append([]string{k}, list...)
|
||||
}
|
||||
}
|
||||
listlen := len(list)
|
||||
|
||||
if listlen == 1 {
|
||||
ex.Col = list[0]
|
||||
} else if listlen > 1 {
|
||||
ex.Col = list[listlen-1]
|
||||
ex.NestedCols = list[:listlen]
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func setOrderByColName(ob *OrderBy, node *Node) {
|
||||
var list []string
|
||||
|
||||
for n := node; n != nil; n = n.Parent {
|
||||
if len(n.Name) != 0 {
|
||||
list = append([]string{n.Name}, list...)
|
||||
}
|
||||
}
|
||||
if len(list) != 0 {
|
||||
ob.Col = buildPath(list)
|
||||
}
|
||||
}
|
||||
|
||||
func pushChildren(st *util.Stack, exp *Exp, node *Node) {
|
||||
for i := range node.Children {
|
||||
node.Children[i].exp = exp
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
func pushChild(st *util.Stack, exp *Exp, node *Node) {
|
||||
node.Children[0].exp = exp
|
||||
st.Push(node.Children[0])
|
||||
}
|
||||
|
||||
func compileFilter(filter []string) (*Exp, bool, error) {
|
||||
var fl *Exp
|
||||
var needsUser bool
|
||||
|
||||
com := &Compiler{}
|
||||
st := util.NewStack()
|
||||
|
||||
if len(filter) == 0 {
|
||||
return &Exp{Op: OpNop, doFree: false}, false, nil
|
||||
}
|
||||
|
||||
for i := range filter {
|
||||
if filter[i] == "false" {
|
||||
return &Exp{Op: OpFalse, doFree: false}, false, nil
|
||||
}
|
||||
|
||||
node, err := ParseArgValue(filter[i])
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
f, nu, err := com.compileArgNode(st, node, false)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if nu {
|
||||
needsUser = true
|
||||
}
|
||||
|
||||
// TODO: Invalid table names in nested where causes fail silently
|
||||
// returning a nil 'f' this needs to be fixed
|
||||
|
||||
// TODO: Invalid where clauses such as missing op (eg. eq) also fail silently
|
||||
|
||||
if fl == nil {
|
||||
fl = f
|
||||
} else {
|
||||
fl = &Exp{Op: OpAnd, Children: []*Exp{fl, f}, doFree: false}
|
||||
}
|
||||
}
|
||||
return fl, needsUser, nil
|
||||
}
|
||||
|
||||
func buildPath(a []string) string {
|
||||
switch len(a) {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
return a[0]
|
||||
}
|
||||
|
||||
n := len(a) - 1
|
||||
for i := 0; i < len(a); i++ {
|
||||
n += len(a[i])
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(n)
|
||||
b.WriteString(a[0])
|
||||
for _, s := range a[1:] {
|
||||
b.WriteRune('.')
|
||||
b.WriteString(s)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (t ExpOp) String() string {
|
||||
var v string
|
||||
|
||||
switch t {
|
||||
case OpNop:
|
||||
v = "op-nop"
|
||||
case OpAnd:
|
||||
v = "op-and"
|
||||
case OpOr:
|
||||
v = "op-or"
|
||||
case OpNot:
|
||||
v = "op-not"
|
||||
case OpEquals:
|
||||
v = "op-equals"
|
||||
case OpNotEquals:
|
||||
v = "op-not-equals"
|
||||
case OpGreaterOrEquals:
|
||||
v = "op-greater-or-equals"
|
||||
case OpLesserOrEquals:
|
||||
v = "op-lesser-or-equals"
|
||||
case OpGreaterThan:
|
||||
v = "op-greater-than"
|
||||
case OpLesserThan:
|
||||
v = "op-lesser-than"
|
||||
case OpIn:
|
||||
v = "op-in"
|
||||
case OpNotIn:
|
||||
v = "op-not-in"
|
||||
case OpLike:
|
||||
v = "op-like"
|
||||
case OpNotLike:
|
||||
v = "op-not-like"
|
||||
case OpILike:
|
||||
v = "op-i-like"
|
||||
case OpNotILike:
|
||||
v = "op-not-i-like"
|
||||
case OpSimilar:
|
||||
v = "op-similar"
|
||||
case OpNotSimilar:
|
||||
v = "op-not-similar"
|
||||
case OpContains:
|
||||
v = "op-contains"
|
||||
case OpContainedIn:
|
||||
v = "op-contained-in"
|
||||
case OpHasKey:
|
||||
v = "op-has-key"
|
||||
case OpHasKeyAny:
|
||||
v = "op-has-key-any"
|
||||
case OpHasKeyAll:
|
||||
v = "op-has-key-all"
|
||||
case OpIsNull:
|
||||
v = "op-is-null"
|
||||
case OpEqID:
|
||||
v = "op-eq-id"
|
||||
case OpTsQuery:
|
||||
v = "op-ts-query"
|
||||
}
|
||||
return fmt.Sprintf("<%s>", v)
|
||||
}
|
||||
|
||||
func FreeExp(ex *Exp) {
|
||||
if ex.doFree {
|
||||
expPool.Put(ex)
|
||||
}
|
||||
}
|
||||
|
||||
func argErr(name, ty string) error {
|
||||
return fmt.Errorf("value for argument '%s' must be a %s", name, ty)
|
||||
}
|
||||
|
||||
func freeNodes(op *Operation) {
|
||||
var st *util.Stack
|
||||
fm := make(map[*Node]struct{})
|
||||
|
||||
for i := range op.Args {
|
||||
arg := op.Args[i]
|
||||
if arg.df {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range arg.Val.Children {
|
||||
if st == nil {
|
||||
st = util.NewStack()
|
||||
}
|
||||
c := arg.Val.Children[i]
|
||||
if _, ok := fm[c]; !ok {
|
||||
st.Push(c)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := fm[arg.Val]; !ok {
|
||||
nodePool.Put(arg.Val)
|
||||
fm[arg.Val] = struct{}{}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for i := range op.Fields {
|
||||
f := op.Fields[i]
|
||||
|
||||
for j := range f.Args {
|
||||
arg := f.Args[j]
|
||||
if arg.df {
|
||||
continue
|
||||
}
|
||||
|
||||
for k := range arg.Val.Children {
|
||||
if st == nil {
|
||||
st = util.NewStack()
|
||||
}
|
||||
c := arg.Val.Children[k]
|
||||
if _, ok := fm[c]; !ok {
|
||||
st.Push(c)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := fm[arg.Val]; !ok {
|
||||
nodePool.Put(arg.Val)
|
||||
fm[arg.Val] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if st == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
intf := st.Pop()
|
||||
node, ok := intf.(*Node)
|
||||
if !ok || node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range node.Children {
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
|
||||
if _, ok := fm[node]; !ok {
|
||||
nodePool.Put(node)
|
||||
fm[node] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
package qcode
|
||||
|
||||
func GetQType(gql string) QType {
|
||||
ic := false
|
||||
for i := range gql {
|
||||
b := gql[i]
|
||||
switch {
|
||||
case b == '#':
|
||||
ic = true
|
||||
case b == '\n':
|
||||
ic = false
|
||||
case !ic && b == '{':
|
||||
return QTQuery
|
||||
case !ic && al(b):
|
||||
switch b {
|
||||
case 'm', 'M':
|
||||
return QTMutation
|
||||
case 'q', 'Q':
|
||||
return QTQuery
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func al(b byte) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
|
||||
}
|
||||
|
||||
func (qt QType) String() string {
|
||||
switch qt {
|
||||
case QTUnknown:
|
||||
return "unknown"
|
||||
case QTQuery:
|
||||
return "query"
|
||||
case QTMutation:
|
||||
return "mutation"
|
||||
case QTInsert:
|
||||
return "insert"
|
||||
case QTUpdate:
|
||||
return "update"
|
||||
case QTDelete:
|
||||
return "delete"
|
||||
case QTUpsert:
|
||||
return "upsert"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
package qcode
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGetQType(t *testing.T) {
|
||||
type args struct {
|
||||
gql string
|
||||
}
|
||||
type ts struct {
|
||||
name string
|
||||
args args
|
||||
want QType
|
||||
}
|
||||
tests := []ts{
|
||||
ts{
|
||||
name: "query",
|
||||
args: args{gql: " query {"},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "mutation",
|
||||
args: args{gql: " mutation {"},
|
||||
want: QTMutation,
|
||||
},
|
||||
ts{
|
||||
name: "default query",
|
||||
args: args{gql: " {"},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "default query with comment",
|
||||
args: args{gql: `# query is good
|
||||
{`},
|
||||
want: QTQuery,
|
||||
},
|
||||
ts{
|
||||
name: "failed query with comment",
|
||||
args: args{gql: `# query is good query {`},
|
||||
want: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := GetQType(tt.args.gql); got != tt.want {
|
||||
t.Errorf("GetQType() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,490 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/chirino/graphql"
|
||||
"github.com/chirino/graphql/resolvers"
|
||||
"github.com/chirino/graphql/schema"
|
||||
"github.com/dosco/super-graph/core/internal/psql"
|
||||
)
|
||||
|
||||
var typeMap map[string]string = map[string]string{
|
||||
"smallint": "Int",
|
||||
"integer": "Int",
|
||||
"bigint": "Int",
|
||||
"smallserial": "Int",
|
||||
"serial": "Int",
|
||||
"bigserial": "Int",
|
||||
"decimal": "Float",
|
||||
"numeric": "Float",
|
||||
"real": "Float",
|
||||
"double precision": "Float",
|
||||
"money": "Float",
|
||||
"boolean": "Boolean",
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initGraphQLEgine() error {
|
||||
engine := graphql.New()
|
||||
engineSchema := engine.Schema
|
||||
dbSchema := sg.schema
|
||||
|
||||
if err := engineSchema.Parse(`enum OrderDirection { asc desc }`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gqltype := func(col psql.DBColumn) schema.Type {
|
||||
typeName := typeMap[strings.ToLower(col.Type)]
|
||||
if typeName == "" {
|
||||
typeName = "String"
|
||||
}
|
||||
var t schema.Type = &schema.TypeName{Name: typeName}
|
||||
if col.NotNull {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
query := &schema.Object{
|
||||
Name: "Query",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
mutation := &schema.Object{
|
||||
Name: "Mutation",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[query.Name] = query
|
||||
engineSchema.Types[mutation.Name] = mutation
|
||||
engineSchema.EntryPoints[schema.Query] = query
|
||||
engineSchema.EntryPoints[schema.Mutation] = mutation
|
||||
|
||||
//validGraphQLIdentifierRegex := regexp.MustCompile(`^[A-Za-z_][A-Za-z_0-9]*$`)
|
||||
|
||||
scalarExpressionTypesNeeded := map[string]bool{}
|
||||
tableNames := dbSchema.GetTableNames()
|
||||
funcs := dbSchema.GetFunctions()
|
||||
|
||||
for _, table := range tableNames {
|
||||
ti, err := dbSchema.GetTable(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ti.IsSingular {
|
||||
continue
|
||||
}
|
||||
|
||||
singularName := ti.Singular
|
||||
// if !validGraphQLIdentifierRegex.MatchString(singularName) {
|
||||
// return errors.New("table name is not a valid GraphQL identifier: " + singularName)
|
||||
// }
|
||||
pluralName := ti.Plural
|
||||
// if !validGraphQLIdentifierRegex.MatchString(pluralName) {
|
||||
// return errors.New("table name is not a valid GraphQL identifier: " + pluralName)
|
||||
// }
|
||||
|
||||
outputType := &schema.Object{
|
||||
Name: singularName + "Output",
|
||||
Fields: schema.FieldList{},
|
||||
}
|
||||
engineSchema.Types[outputType.Name] = outputType
|
||||
|
||||
inputType := &schema.InputObject{
|
||||
Name: singularName + "Input",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[inputType.Name] = inputType
|
||||
|
||||
orderByType := &schema.InputObject{
|
||||
Name: singularName + "OrderBy",
|
||||
Fields: schema.InputValueList{},
|
||||
}
|
||||
engineSchema.Types[orderByType.Name] = orderByType
|
||||
|
||||
expressionTypeName := singularName + "Expression"
|
||||
expressionType := &schema.InputObject{
|
||||
Name: expressionTypeName,
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: "and",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "or",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionTypeName}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
|
||||
for _, col := range ti.Columns {
|
||||
colName := col.Name
|
||||
// if !validGraphQLIdentifierRegex.MatchString(colName) {
|
||||
// return errors.New("column name is not a valid GraphQL identifier: " + colName)
|
||||
// }
|
||||
|
||||
colType := gqltype(col)
|
||||
nullableColType := ""
|
||||
if x, ok := colType.(*schema.NonNull); ok {
|
||||
nullableColType = x.OfType.(*schema.TypeName).Name
|
||||
} else {
|
||||
nullableColType = colType.(*schema.TypeName).Name
|
||||
}
|
||||
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
|
||||
for _, f := range funcs {
|
||||
if col.Type != f.Params[0].Type {
|
||||
continue
|
||||
}
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: f.Name + "_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
// If it's a numeric type...
|
||||
if nullableColType == "Float" || nullableColType == "Int" {
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "avg_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "count_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "max_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "min_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "stddev_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "variance_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_pop_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
outputType.Fields = append(outputType.Fields, &schema.Field{
|
||||
Name: "var_samp_" + colName,
|
||||
Type: colType,
|
||||
})
|
||||
}
|
||||
|
||||
inputType.Fields = append(inputType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: colType,
|
||||
})
|
||||
orderByType.Fields = append(orderByType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "OrderDirection"}},
|
||||
})
|
||||
|
||||
scalarExpressionTypesNeeded[nullableColType] = true
|
||||
|
||||
expressionType.Fields = append(expressionType.Fields, &schema.InputValue{
|
||||
Name: colName,
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: nullableColType + "Expression"}},
|
||||
})
|
||||
}
|
||||
|
||||
outputTypeName := &schema.TypeName{Name: outputType.Name}
|
||||
inputTypeName := &schema.TypeName{Name: inputType.Name}
|
||||
pluralOutputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: outputType.Name}}}}
|
||||
pluralInputTypeName := &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: inputType.Name}}}}
|
||||
|
||||
args := schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: "To sort or ordering results just use the order_by argument. This can be combined with where, search, etc to build complex queries to fit you needs."},
|
||||
Name: "order_by",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: orderByType.Name}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "where",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: expressionType.Name}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "limit",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "offset",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "first",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "last",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Int"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "before",
|
||||
Type: &schema.TypeName{Name: "String"},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "after",
|
||||
Type: &schema.TypeName{Name: "String"},
|
||||
},
|
||||
}
|
||||
if ti.PrimaryCol != nil {
|
||||
t := gqltype(*ti.PrimaryCol)
|
||||
if _, ok := t.(*schema.NonNull); !ok {
|
||||
t = &schema.NonNull{OfType: t}
|
||||
}
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: schema.Description{Text: "Finds the record by the primary key"},
|
||||
Name: "id",
|
||||
Type: t,
|
||||
})
|
||||
}
|
||||
|
||||
if ti.TSVCol != nil {
|
||||
args = append(args, &schema.InputValue{
|
||||
Desc: schema.Description{Text: "Performs full text search using a TSV index"},
|
||||
Name: "search",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
})
|
||||
}
|
||||
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: singularName,
|
||||
Type: outputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
query.Fields = append(query.Fields, &schema.Field{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: pluralName,
|
||||
Type: pluralOutputTypeName,
|
||||
Args: args,
|
||||
})
|
||||
|
||||
mutationArgs := append(args, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "insert",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "update",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "upsert",
|
||||
Type: inputTypeName,
|
||||
},
|
||||
}...)
|
||||
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: singularName,
|
||||
Args: mutationArgs,
|
||||
Type: outputType,
|
||||
})
|
||||
mutation.Fields = append(mutation.Fields, &schema.Field{
|
||||
Name: pluralName,
|
||||
Args: append(mutationArgs, schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "inserts",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "updates",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
&schema.InputValue{
|
||||
Desc: schema.Description{Text: ""},
|
||||
Name: "upserts",
|
||||
Type: pluralInputTypeName,
|
||||
},
|
||||
}...),
|
||||
Type: outputType,
|
||||
})
|
||||
}
|
||||
|
||||
for typeName := range scalarExpressionTypesNeeded {
|
||||
expressionType := &schema.InputObject{
|
||||
Name: typeName + "Expression",
|
||||
Fields: schema.InputValueList{
|
||||
&schema.InputValue{
|
||||
Name: "eq",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "neq",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "gt",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "greater_than",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lt",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lesser_than",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "gte",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "greater_or_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lte",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "lesser_or_equals",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "in",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nin",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_in",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
|
||||
&schema.InputValue{
|
||||
Name: "like",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nlike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_like",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "ilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_ilike",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "similar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "nsimilar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "not_similar",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key_any",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "has_key_all",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "contains",
|
||||
Type: &schema.NonNull{OfType: &schema.List{OfType: &schema.NonNull{OfType: &schema.TypeName{Name: typeName}}}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "contained_in",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "String"}},
|
||||
},
|
||||
&schema.InputValue{
|
||||
Name: "is_null",
|
||||
Type: &schema.NonNull{OfType: &schema.TypeName{Name: "Boolean"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
engineSchema.Types[expressionType.Name] = expressionType
|
||||
}
|
||||
|
||||
if err := engineSchema.ResolveTypes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
engine.Resolver = resolvers.Func(func(request *resolvers.ResolveRequest, next resolvers.Resolution) resolvers.Resolution {
|
||||
resolver := resolvers.MetadataResolver.Resolve(request, next)
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
resolver = resolvers.MethodResolver.Resolve(request, next) // needed by the MetadataResolver
|
||||
if resolver != nil {
|
||||
return resolver
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
sg.ge = engine
|
||||
return nil
|
||||
}
|
169
core/prepare.go
@ -1,169 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/allow"
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
)
|
||||
|
||||
type query struct {
|
||||
sync.Once
|
||||
sd *sql.Stmt
|
||||
ai allow.Item
|
||||
qt qcode.QType
|
||||
err error
|
||||
st stmt
|
||||
roleArg bool
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) prepare(q *query, role string) {
|
||||
var stmts []stmt
|
||||
var err error
|
||||
|
||||
qb := []byte(q.ai.Query)
|
||||
vars := []byte(q.ai.Vars)
|
||||
|
||||
switch q.qt {
|
||||
case qcode.QTQuery:
|
||||
if sg.abacEnabled {
|
||||
stmts, err = sg.buildMultiStmt(qb, vars)
|
||||
} else {
|
||||
stmts, err = sg.buildRoleStmt(qb, vars, role)
|
||||
}
|
||||
|
||||
case qcode.QTMutation:
|
||||
stmts, err = sg.buildRoleStmt(qb, vars, role)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
sg.log.Printf("WRN %s %s: %v", q.qt, q.ai.Name, err)
|
||||
}
|
||||
|
||||
q.st = stmts[0]
|
||||
q.roleArg = len(stmts) > 1
|
||||
|
||||
q.sd, err = sg.db.Prepare(q.st.sql)
|
||||
if err != nil {
|
||||
q.err = fmt.Errorf("prepare failed: %v: %s", err, q.st.sql)
|
||||
}
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initPrepared() error {
|
||||
if sg.allowList.IsPersist() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := sg.prepareRoleStmt(); err != nil {
|
||||
return fmt.Errorf("role query: %w", err)
|
||||
}
|
||||
|
||||
sg.queries = make(map[uint64]*query)
|
||||
|
||||
list, err := sg.allowList.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h := maphash.Hash{}
|
||||
h.SetSeed(sg.hashSeed)
|
||||
|
||||
for _, v := range list {
|
||||
if v.Query == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
qt := qcode.GetQType(v.Query)
|
||||
|
||||
switch qt {
|
||||
case qcode.QTQuery:
|
||||
sg.queries[queryID(&h, v.Name, "user")] = &query{ai: v, qt: qt}
|
||||
sg.queries[queryID(&h, v.Name, "anon")] = &query{ai: v, qt: qt}
|
||||
|
||||
case qcode.QTMutation:
|
||||
for _, role := range sg.conf.Roles {
|
||||
sg.queries[queryID(&h, v.Name, role.Name)] = &query{ai: v, qt: qt}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
func (sg *SuperGraph) prepareRoleStmt() error {
|
||||
var err error
|
||||
|
||||
if !sg.abacEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
rq := strings.ReplaceAll(sg.conf.RolesQuery, "$user_id", "$1")
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
io.WriteString(w, `SELECT (CASE WHEN EXISTS (`)
|
||||
io.WriteString(w, rq)
|
||||
io.WriteString(w, `) THEN `)
|
||||
|
||||
io.WriteString(w, `(SELECT (CASE`)
|
||||
for _, role := range sg.conf.Roles {
|
||||
if role.Match == "" {
|
||||
continue
|
||||
}
|
||||
io.WriteString(w, ` WHEN `)
|
||||
io.WriteString(w, role.Match)
|
||||
io.WriteString(w, ` THEN '`)
|
||||
io.WriteString(w, role.Name)
|
||||
io.WriteString(w, `'`)
|
||||
}
|
||||
|
||||
io.WriteString(w, ` ELSE $2 END) FROM (`)
|
||||
io.WriteString(w, rq)
|
||||
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
|
||||
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler" LIMIT 1; `)
|
||||
|
||||
sg.getRole, err = sg.db.Prepare(w.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) initAllowList() error {
|
||||
var ac allow.Config
|
||||
var err error
|
||||
|
||||
if sg.conf.AllowListFile == "" {
|
||||
sg.conf.AllowListFile = "allow.list"
|
||||
}
|
||||
|
||||
// When list is not eabled it is still created and
|
||||
// and new queries are saved to it.
|
||||
if !sg.conf.UseAllowList {
|
||||
ac = allow.Config{CreateIfNotExists: true, Persist: true, Log: sg.log}
|
||||
}
|
||||
|
||||
sg.allowList, err = allow.New(sg.conf.AllowListFile, ac)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize allow list: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
func queryID(h *maphash.Hash, name, role string) uint64 {
|
||||
h.WriteString(name)
|
||||
h.WriteString(role)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
|
||||
return v
|
||||
}
|
252
core/remote.go
@ -1,252 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/dosco/super-graph/core/internal/qcode"
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
)
|
||||
|
||||
func (sg *SuperGraph) execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
sel := st.qc.Selects
|
||||
h := maphash.Hash{}
|
||||
h.SetSeed(sg.hashSeed)
|
||||
|
||||
// fetch the field name used within the db response json
|
||||
// that are used to mark insertion points and the mapping between
|
||||
// those field names and their select objects
|
||||
fids, sfmap := sg.parentFieldIds(&h, sel, st.md.Skipped())
|
||||
|
||||
// fetch the field values of the marked insertion points
|
||||
// these values contain the id to be used with fetching remote data
|
||||
from := jsn.Get(data, fids)
|
||||
var to []jsn.Field
|
||||
|
||||
switch {
|
||||
case len(from) == 1:
|
||||
to, err = sg.resolveRemote(hdr, &h, from[0], sel, sfmap)
|
||||
|
||||
case len(from) > 1:
|
||||
to, err = sg.resolveRemotes(hdr, &h, from, sel, sfmap)
|
||||
|
||||
default:
|
||||
return nil, errors.New("something wrong no remote ids found in db response")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ob bytes.Buffer
|
||||
|
||||
err = jsn.Replace(&ob, data, from, to)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ob.Bytes(), nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) resolveRemote(
|
||||
hdr http.Header,
|
||||
h *maphash.Hash,
|
||||
field jsn.Field,
|
||||
sel []qcode.Select,
|
||||
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
|
||||
// replacement data for the marked insertion points
|
||||
// key and value will be replaced by whats below
|
||||
toA := [1]jsn.Field{}
|
||||
to := toA[:1]
|
||||
|
||||
// use the json key to find the related Select object
|
||||
_, _ = h.Write(field.Key)
|
||||
k1 := h.Sum64()
|
||||
|
||||
s, ok := sfmap[k1]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
p := sel[s.ParentID]
|
||||
|
||||
// then use the Table nme in the Select and it's parent
|
||||
// to find the resolver to use for this relationship
|
||||
k2 := mkkey(h, s.Name, p.Name)
|
||||
|
||||
r, ok := sg.rmap[k2]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
id := jsn.Value(field.Value)
|
||||
if len(id) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
//st := time.Now()
|
||||
|
||||
b, err := r.Fn(hdr, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(r.Path) != 0 {
|
||||
b = jsn.Strip(b, r.Path)
|
||||
}
|
||||
|
||||
var ob bytes.Buffer
|
||||
|
||||
if len(s.Cols) != 0 {
|
||||
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
ob.WriteString("null")
|
||||
}
|
||||
|
||||
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
return to, nil
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) resolveRemotes(
|
||||
hdr http.Header,
|
||||
h *maphash.Hash,
|
||||
from []jsn.Field,
|
||||
sel []qcode.Select,
|
||||
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
|
||||
|
||||
// replacement data for the marked insertion points
|
||||
// key and value will be replaced by whats below
|
||||
to := make([]jsn.Field, len(from))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(from))
|
||||
|
||||
var cerr error
|
||||
|
||||
for i, id := range from {
|
||||
|
||||
// use the json key to find the related Select object
|
||||
_, _ = h.Write(id.Key)
|
||||
k1 := h.Sum64()
|
||||
|
||||
s, ok := sfmap[k1]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
p := sel[s.ParentID]
|
||||
|
||||
// then use the Table nme in the Select and it's parent
|
||||
// to find the resolver to use for this relationship
|
||||
k2 := mkkey(h, s.Name, p.Name)
|
||||
|
||||
r, ok := sg.rmap[k2]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
id := jsn.Value(id.Value)
|
||||
if len(id) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
go func(n int, id []byte, s *qcode.Select) {
|
||||
defer wg.Done()
|
||||
|
||||
//st := time.Now()
|
||||
|
||||
b, err := r.Fn(hdr, id)
|
||||
if err != nil {
|
||||
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.Path) != 0 {
|
||||
b = jsn.Strip(b, r.Path)
|
||||
}
|
||||
|
||||
var ob bytes.Buffer
|
||||
|
||||
if len(s.Cols) != 0 {
|
||||
err = jsn.Filter(&ob, b, colsToList(s.Cols))
|
||||
if err != nil {
|
||||
cerr = fmt.Errorf("%s: %s", s.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
} else {
|
||||
ob.WriteString("null")
|
||||
}
|
||||
|
||||
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
|
||||
}(i, id, s)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return to, cerr
|
||||
}
|
||||
|
||||
func (sg *SuperGraph) parentFieldIds(h *maphash.Hash, sel []qcode.Select, skipped uint32) (
|
||||
[][]byte,
|
||||
map[uint64]*qcode.Select) {
|
||||
|
||||
c := 0
|
||||
for i := range sel {
|
||||
s := &sel[i]
|
||||
if isSkipped(skipped, uint32(s.ID)) {
|
||||
c++
|
||||
}
|
||||
}
|
||||
|
||||
// list of keys (and it's related value) to extract from
|
||||
// the db json response
|
||||
fm := make([][]byte, c)
|
||||
|
||||
// mapping between the above extracted key and a Select
|
||||
// object
|
||||
sm := make(map[uint64]*qcode.Select, c)
|
||||
n := 0
|
||||
|
||||
for i := range sel {
|
||||
s := &sel[i]
|
||||
|
||||
if !isSkipped(skipped, uint32(s.ID)) {
|
||||
continue
|
||||
}
|
||||
|
||||
p := sel[s.ParentID]
|
||||
k := mkkey(h, s.Name, p.Name)
|
||||
|
||||
if r, ok := sg.rmap[k]; ok {
|
||||
fm[n] = r.IDField
|
||||
n++
|
||||
|
||||
_, _ = h.Write(r.IDField)
|
||||
sm[h.Sum64()] = s
|
||||
}
|
||||
}
|
||||
|
||||
return fm, sm
|
||||
}
|
||||
|
||||
func isSkipped(n, pos uint32) bool {
|
||||
return ((n & (1 << pos)) != 0)
|
||||
}
|
||||
|
||||
func colsToList(cols []qcode.Column) []string {
|
||||
var f []string
|
||||
|
||||
for i := range cols {
|
||||
f = append(f, cols[i].Name)
|
||||
}
|
||||
return f
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package core
|
||||
|
||||
import "hash/maphash"
|
||||
|
||||
// nolint: errcheck
|
||||
func mkkey(h *maphash.Hash, k1, k2 string) uint64 {
|
||||
h.WriteString(k1)
|
||||
h.WriteString(k2)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
|
||||
return v
|
||||
}
|
12
demo
@ -1,13 +1,13 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" == "start" ]; then
|
||||
echo "Downloading pre-built docker images"
|
||||
docker-compose -f examples/rails-app/demo.yml pull
|
||||
docker-compose -f rails-app/demo.yml pull
|
||||
echo "Setting up and deploying Super Graph and the demo Rails app"
|
||||
docker-compose -f examples/rails-app/demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
docker-compose -f examples/rails-app/demo.yml up
|
||||
docker-compose -f rails-app/demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
docker-compose -f rails-app/demo.yml up
|
||||
elif [ "$1" == "stop" ]; then
|
||||
docker-compose -f examples/rails-app/demo.yml down
|
||||
docker-compose -f rails-app/demo.yml down
|
||||
else
|
||||
echo "./demo [start|stop]"
|
||||
fi
|
||||
fi
|
@ -1,10 +1,7 @@
|
||||
version: '3.4'
|
||||
services:
|
||||
db:
|
||||
image: postgres:12
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
image: postgres
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
@ -14,16 +11,16 @@ services:
|
||||
# ports:
|
||||
# - "6379:6379"
|
||||
|
||||
rails_app:
|
||||
build: examples/rails-app/.
|
||||
command: bash -c "rm -f tmp/pids/server.pid && bundle exec rails s -p 3000 -b '0.0.0.0'"
|
||||
volumes:
|
||||
- ./examples/rails-app:/app
|
||||
- /app/tmp
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- db
|
||||
# rails_app:
|
||||
# build: rails-app/.
|
||||
# command: bash -c "rm -f tmp/pids/server.pid && bundle exec rails s -p 3000 -b '0.0.0.0'"
|
||||
# volumes:
|
||||
# - ./rails-app:/app
|
||||
# - /app/tmp
|
||||
# ports:
|
||||
# - "3000:3000"
|
||||
# depends_on:
|
||||
# - db
|
||||
|
||||
super_graph:
|
||||
build:
|
||||
@ -37,9 +34,9 @@ services:
|
||||
volumes:
|
||||
- .:/app
|
||||
working_dir: /app
|
||||
command: wtc
|
||||
command: wu -pattern="*.go" go run main.go serv
|
||||
depends_on:
|
||||
- db
|
||||
# - rails_app
|
||||
#- rails_app
|
||||
|
||||
# - redis
|
||||
|
0
docs/guide/.gitignore → docs/.gitignore
vendored
99
docs/.vuepress/components/HomeLayout.vue
Normal file
@ -0,0 +1,99 @@
|
||||
<template>
|
||||
<div>
|
||||
<main aria-labelledby="main-title" >
|
||||
<Navbar />
|
||||
|
||||
<div class="container mx-auto">
|
||||
<div class="flex flex-col md:flex-row justify-between px-10 md:px-20">
|
||||
<div class="bg-bottom bg-no-repeat bg-cover">
|
||||
<div class="text-center md:text-left pt-24">
|
||||
<h1 v-if="data.heroText !== null" class="text-5xl font-bold text-black pb-0 uppercase">
|
||||
{{ data.heroText || $title || 'Hello' }}
|
||||
</h1>
|
||||
|
||||
<p class="text-2xl text-gray-700 leading-tight pb-0">
|
||||
{{ data.tagline || $description || 'Welcome to your VuePress site' }}
|
||||
</p>
|
||||
|
||||
<p class="text-lg text-gray-600 leading-tight">
|
||||
{{ data.longTagline }}
|
||||
</p>
|
||||
|
||||
<NavLink
|
||||
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded"
|
||||
:item="actionLink"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="py-10 md:p-20">
|
||||
<img src="/hologram.svg" class="h-64">
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div
|
||||
class="flex flex-wrap mx-2 md:mx-20"
|
||||
v-if="data.features && data.features.length"
|
||||
>
|
||||
<div
|
||||
class="w-2/4 md:w-1/3 shadow"
|
||||
v-for="(feature, index) in data.features"
|
||||
:key="index"
|
||||
>
|
||||
<div class="p-8">
|
||||
<h2 class="md:text-xl text-blue-800 font-medium border-0 mb-1">{{ feature.title }}</h2>
|
||||
<p class="md:text-xl text-gray-700 leading-snug">{{ feature.details }}</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-gray-100 my-10">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
What is {{ data.heroText }}?
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
{{ data.description }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="theme-default-content markdown">
|
||||
<Content />
|
||||
</div>
|
||||
|
||||
<div
|
||||
class="mx-auto text-center py-8"
|
||||
v-if="data.footer"
|
||||
>
|
||||
{{ data.footer }}
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import NavLink from '@theme/components/NavLink.vue'
|
||||
import Navbar from '@theme/components/Navbar.vue'
|
||||
|
||||
export default {
|
||||
components: { NavLink, Navbar },
|
||||
|
||||
computed: {
|
||||
data () {
|
||||
return this.$page.frontmatter
|
||||
},
|
||||
|
||||
actionLink () {
|
||||
return {
|
||||
link: this.data.actionLink,
|
||||
text: this.data.actionText
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
30
docs/.vuepress/config.js
Normal file
@ -0,0 +1,30 @@
|
||||
module.exports = {
|
||||
title: 'Super Graph',
|
||||
description: 'Get an instant GraphQL API for your Rails apps.',
|
||||
|
||||
themeConfig: {
|
||||
logo: '/logo.svg',
|
||||
nav: [
|
||||
{ text: 'Docs', link: '/guide' },
|
||||
{ text: 'Deploy', link: '/deploy' },
|
||||
{ text: 'Github', link: 'https://github.com/dosco/super-graph' },
|
||||
{ text: 'Docker', link: 'https://hub.docker.com/r/dosco/super-graph/builds' },
|
||||
],
|
||||
serviceWorker: {
|
||||
updatePopup: true
|
||||
},
|
||||
},
|
||||
|
||||
postcss: {
|
||||
plugins: [
|
||||
require('postcss-import'),
|
||||
require('tailwindcss'),
|
||||
require('postcss-nested'),
|
||||
require('autoprefixer')
|
||||
]
|
||||
},
|
||||
|
||||
plugins: [
|
||||
'@vuepress/plugin-nprogress',
|
||||
]
|
||||
}
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
BIN
docs/.vuepress/public/hero.png
Normal file
After Width: | Height: | Size: 2.5 KiB |
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 1.8 KiB After Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 146 KiB After Width: | Height: | Size: 146 KiB |
Before Width: | Height: | Size: 911 KiB After Width: | Height: | Size: 911 KiB |
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 42 KiB |
17
docs/.vuepress/styles/index.styl
Normal file
@ -0,0 +1,17 @@
|
||||
@tailwind base;
|
||||
|
||||
@css {
|
||||
h1 {
|
||||
@apply font-semibold text-3xl border-0 py-4
|
||||
}
|
||||
h2, h3, h4 {
|
||||
@apply font-semibold text-2xl border-0 py-4
|
||||
}
|
||||
p {
|
||||
@apply py-2 text-lg
|
||||
}
|
||||
}
|
||||
|
||||
@tailwind components;
|
||||
|
||||
@tailwind utilities;
|
76
docs/README.md
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
layout: HomeLayout
|
||||
|
||||
home: true
|
||||
heroText: "Super Graph"
|
||||
heroImage: /super-graph-web-ui-half.png
|
||||
heroImageMobile: /super-graph-web-ui.png
|
||||
tagline: Build web products faster. Instant APIs for your apps
|
||||
longTagline: Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries.
|
||||
actionText: Get Started, Free, Open Source →
|
||||
actionLink: /guide
|
||||
|
||||
description: Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
|
||||
|
||||
features:
|
||||
- title: Simple
|
||||
details: Easy config file, quick to deploy, No code needed. It just works.
|
||||
- title: High Performance
|
||||
details: Compiles your GraphQL into a fast SQL query in realtime.
|
||||
- title: Ruby-on-Rails
|
||||
details: Can read Rails cookies and supports rails database conventions.
|
||||
- title: Serverless
|
||||
details: Instant startup for scale to zero environments like Google Cloud Run, App Engine, AWS Lambda
|
||||
- title: Go Lang
|
||||
details: Go is a language created at Google to build fast and secure web services.
|
||||
- title: Free and Open Source
|
||||
details: Not a VC funded startup. Not even a startup just good old open source code
|
||||
|
||||
footer: MIT Licensed | Copyright © 2018-present Vikram Rangnekar
|
||||
---
|
||||
|
||||
## Try the demo
|
||||
|
||||
```bash
|
||||
# download super graph source
|
||||
git clone https://github.com/dosco/super-graph.git
|
||||
|
||||
# setup the demo rails app & database and run it
|
||||
./demo start
|
||||
|
||||
# signin to the demo app (user1@demo.com / 123456)
|
||||
open http://localhost:3000
|
||||
|
||||
# try the super graph web ui
|
||||
open http://localhost:8080
|
||||
```
|
||||
|
||||
## Try a query
|
||||
|
||||
```graphql
|
||||
query {
|
||||
users {
|
||||
id
|
||||
email
|
||||
picture : avatar
|
||||
products(limit: 2, where: { price: { gt: 10 } }) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Why Super Graph?
|
||||
|
||||
After working on several product though my career I found building CRUD APIs (Create, Update, Delete, List, Show) to be a big part of the job. It was always the same thing figure out what the UI needs then build an endpoint for it, if related data is needed than join with another table. I didn't want to write that code anymore I wanted the computer to just do it.
|
||||
|
||||
I always liked GraphQL it sounded friendly, but it still required me to write all the same database query code. Sure the API was nicer but it took a lot of work sometime even more than a simple REST API would have. I wanted a GraphQL server that just worked the second you deployed it without having to write a line of code.
|
||||
|
||||
And so after a lot of coffee and some Avocado toasts __Super Graph was born, a GraphQL server that just works, is high performance and easy to deploy__. I hope you find it as useful as I do and there's a lot more coming so hit that :star: to stay in the loop.
|
||||
|
||||
|
||||
## Say hello
|
||||
|
||||
[twitter.com/dosco](https://twitter.com/dosco)
|