Compare commits

...

61 Commits

Author SHA1 Message Date
62fd1eac55 Add named auth and the all new action endpoints 2020-02-03 01:21:07 -05:00
1a3d74e1ce Fix issues surfaced by the fuzzer 2020-02-02 01:43:09 -05:00
3a4d885987 Fix to ensure only named queries are saved to the allow list 2020-02-01 10:54:19 -05:00
3bd9b199dd Fix bug with connect / disconnect on array relationships 2020-01-31 00:19:38 -05:00
4ffa1483a4 Add ability to treat JSON/JSONB columns as tables 2020-01-28 00:26:53 -05:00
52f3b1c7a2 Add mutation support for connect / disconnect with array relationships 2020-01-26 01:10:54 -05:00
2d466bfb12 Add skip query selectors that require auth in anon role 2020-01-20 23:38:17 -05:00
a0b8907c3c Fix various json parsing and sql generation bugs 2020-01-19 03:12:51 -05:00
8097ca3b8f Fixes example steps (#33) 2020-01-18 16:44:16 -05:00
0e498b0e94 Fix order by with aliases bug 2020-01-17 09:35:14 -05:00
3eb5b83070 Fix invalid update sql bug 2020-01-17 00:48:17 -05:00
e3c94d17d1 Add corrupt query validation 2020-01-16 01:44:19 -05:00
7240b27214 Fix for table alias relationship bug 2020-01-15 23:26:06 -05:00
f37d867e32 Fix remnant debug messages 2020-01-14 23:28:48 -05:00
5e75cc7b83 Merge branch 'master' of github.com:dosco/super-graph 2020-01-14 23:19:11 -05:00
d4dca86267 Fix new app creation bug #32 2020-01-14 23:16:55 -05:00
76340ab008 Remove *pgxpool.Pool arg from NewDBSchema (#31) 2020-01-14 01:08:04 -05:00
3f5727c22b Fix variables with single quotes bug 2020-01-14 01:02:12 -05:00
7c02226016 Fix role filters and nested where bugs 2020-01-13 09:34:15 -05:00
1e31e33707 Fix for Makefile bug #30 2020-01-11 17:15:39 -05:00
0d0d63d8d1 Fix case-sensitivity bug with aliases 2020-01-08 00:48:04 -05:00
c40ff38b05 Fix case-sensitivity bug in GraphQL parser 2020-01-07 23:44:19 -05:00
7a5cf47486 Fix extra comma bug in mutations 2020-01-02 01:54:25 -05:00
5803395bd5 Fix duplicate columns in sql bug 2020-01-01 01:54:38 -05:00
a40bd7fca5 Add HTTP GZip compression 2019-12-31 01:30:20 -05:00
343589c3bd Fix bug with deep-nested queries 2019-12-30 01:17:37 -05:00
482203ba05 Add nested insert and update mutations with support for connect and disconnect 2019-12-29 01:53:54 -05:00
6831d3f56f Add nested mutations 2019-12-25 01:24:30 -05:00
96ed3413fc one-to-many-through: use correct fkey (#27) 2019-12-12 23:40:17 -05:00
bf4c496756 Fix for bug with foreign keys mapping 2019-12-12 00:47:56 -05:00
66055516d2 Fix documentation for DB relationships 2019-12-10 00:03:44 -05:00
2d3e3cbae1 #19 Health check (#24)
* Add health check endpoint (#19)

* Add healthy response (#19)
2019-12-09 01:59:30 -05:00
0e16eee93b Add config driven custom table relationships 2019-12-09 01:48:18 -05:00
679dd1fc83 Fix bug with remote join example 2019-12-02 23:08:57 -05:00
3a14a644ce Merge pull request #22 from bhaskarmurthy/fix-grammer-syntax
Fix grammer / syntax
2019-12-02 11:22:37 -05:00
5da79d91bf Add support for websearch_to_tsquery in PG 11 2019-12-02 10:53:18 -05:00
5aceb337d6 Fix grammer / syntax 2019-12-02 10:32:35 -05:00
5593c66996 Update issue templates 2019-12-01 01:25:39 -05:00
0f9f8bbf0d Update CONTRIBUTING.md 2019-12-01 01:17:27 -05:00
cbfedb6fd2 Update issue templates 2019-12-01 00:42:22 -05:00
37f2417c0b Update feature_request.md 2019-12-01 00:31:53 -05:00
768e8774e7 Create CODE_OF_CONDUCT.md 2019-12-01 00:13:04 -05:00
9140e597e1 Add a guide to the internals of the codebase 2019-11-30 10:45:24 -05:00
94593c9cce Add a CONTRIBUTING.md guide for contributors 2019-11-29 22:29:31 -05:00
a96c211fe5 Refactor rename 'Select.Table to Select.Name` 2019-11-29 01:38:23 -05:00
6d47f0df8e Fix for missing filters on nested selectors 2019-11-29 00:14:05 -05:00
e82bdbed65 Add a CHANGLOG.md 2019-11-29 00:14:05 -05:00
ef51b99fc7 Add issue templates 2019-11-28 16:47:56 -05:00
9ebd03fa8c Move license from MIT to Apache 2.0. Add Makefile 2019-11-28 01:25:46 -05:00
aff2a13ba4 Added support for query names to the allow.list 2019-11-26 01:36:19 -05:00
f518d5fc69 Fix bug with compiling anon queries 2019-11-25 02:22:33 -05:00
7583326d21 Move sql query logging from info to debug 2019-11-22 01:32:09 -05:00
8024a8420b Use logger error instead of panic in goja handlers 2019-11-22 00:58:03 -05:00
6bd27d7c79 Add a db:reset command only for dev mode 2019-11-22 00:07:06 -05:00
a4c09dedd5 Optimize db queries limit use of transactions 2019-11-21 02:14:12 -05:00
176514b5f1 Added support for multi-root queries 2019-11-19 00:47:55 -05:00
396f4bcfd8 Fix issues with JWT auth 2019-11-15 01:35:19 -05:00
51c5f037f4 Fix bug with migration filename generation 2019-11-10 01:41:58 -05:00
c576f1ae16 Fix bug with migration file name 2019-11-10 01:39:24 -05:00
0a6995eaca Fix bug with migration template name 2019-11-10 01:35:06 -05:00
06ed823a51 Fix bug with creating new migrations 2019-11-10 01:33:06 -05:00
104 changed files with 11968 additions and 6230 deletions

49
.chglog/CHANGELOG.tpl.md Executable file
View File

@ -0,0 +1,49 @@
{{ if .Versions -}}
<a name="unreleased"></a>
## [Unreleased]
{{ if .Unreleased.CommitGroups -}}
{{ range .Unreleased.CommitGroups -}}
### {{ .Title }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ end -}}
{{ end -}}
{{ range .Versions }}
<a name="{{ .Tag.Name }}"></a>
## {{ if .Tag.Previous }}[{{ .Tag.Name }}]{{ else }}{{ .Tag.Name }}{{ end }} - {{ datetime "2006-01-02" .Tag.Date }}
{{ range .CommitGroups -}}
### {{ .Title }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{- if .MergeCommits -}}
### Pull Requests
{{ range .MergeCommits -}}
- {{ .Header }}
{{ end }}
{{ end -}}
{{- if .NoteGroups -}}
{{ range .NoteGroups -}}
### {{ .Title }}
{{ range .Notes }}
{{ .Body }}
{{ end }}
{{ end -}}
{{ end -}}
{{ end -}}
{{- if .Versions }}
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
{{ range .Versions -}}
{{ if .Tag.Previous -}}
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
{{ end -}}
{{ end -}}
{{ end -}}

27
.chglog/config.yml Executable file
View File

@ -0,0 +1,27 @@
style: github
template: CHANGELOG.tpl.md
info:
title: CHANGELOG
repository_url: https://github.com/dosco/super-graph
options:
commits:
# filters:
# Type:
# - feat
# - fix
# - perf
# - refactor
commit_groups:
# title_maps:
# feat: Features
# fix: Bug Fixes
# perf: Performance Improvements
# refactor: Code Refactoring
header:
pattern: "^((\\w+)\\s.*)$"
pattern_maps:
- Subject
- Type
notes:
keywords:
- BREAKING CHANGE

24
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,24 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
<!-- If you suspect this could be a bug, follow the template. -->
### What version of Super Graph are you using? `super-graph version`
### Have you tried reproducing the issue with the latest release?
### What is the hardware spec (RAM, OS)?
### Steps to reproduce the issue (config used to run Super Graph).
### Expected behaviour and actual result.

12
.github/ISSUE_TEMPLATE/documentation.md vendored Normal file
View File

@ -0,0 +1,12 @@
---
name: Documentation
about: Suggest how we can improve documentation
title: ''
labels: bug, docs
assignees: ''
---
<!-- If you think the Super Graph documentation falls short https://supergraph.dev/guide.html please suggest ways we can improve it. -->
<!-- explain it here. -->

View File

@ -0,0 +1,14 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
<!-- Please only use this template for submitting feature requests -->
**What would you like to be added**:
**Why is this needed**:

7
.gitignore vendored
View File

@ -27,8 +27,13 @@
main
.DS_Store
.swp
.release
main
super-graph
supergraph
*-fuzz.zip
crashers
suppressions
suppressions
release
.gofuzz
*-fuzz.zip

424
CHANGELOG.md Normal file
View File

@ -0,0 +1,424 @@
<a name="unreleased"></a>
## [Unreleased]
### Add
- Add config driven custom table relationships
- Add support for `websearch_to_tsquery` in PG 11
### Create
- Create CODE_OF_CONDUCT.md
### Fix
- Fix bug with remote join example
- Fix grammer / syntax
### Update
- Update issue templates
- Update CONTRIBUTING.md
- Update issue templates
- Update feature_request.md
<a name="v0.12.6"></a>
## [v0.12.6] - 2019-12-02
### Add
- Add support for `websearch_to_tsquery` in PG 11
<a name="v0.12.5"></a>
## [v0.12.5] - 2019-11-30
### Add
- Add a guide to the internals of the codebase
- Add a CONTRIBUTING.md guide for contributors
- Add a CHANGLOG.md
- Add issue templates
### Fix
- Fix for missing filters on nested selectors
### Refactor
- Refactor rename 'Select.Table` to `Select.Name`
<a name="v0.12.4"></a>
## [v0.12.4] - 2019-11-28
### Move
- Move license from MIT to Apache 2.0. Add Makefile
<a name="v0.12.3"></a>
## [v0.12.3] - 2019-11-26
### Added
- Added support for query names to the allow.list
<a name="v0.12.2"></a>
## [v0.12.2] - 2019-11-25
### Fix
- Fix bug with compiling anon queries
<a name="v0.12.1"></a>
## [v0.12.1] - 2019-11-22
### Move
- Move sql query logging from info to debug
<a name="v0.12.0"></a>
## [v0.12.0] - 2019-11-22
### Use
- Use logger error instead of panic in goja handlers
<a name="v0.11.9"></a>
## [v0.11.9] - 2019-11-22
### Add
- Add a db:reset command only for dev mode
<a name="v0.11.8"></a>
## [v0.11.8] - 2019-11-21
### Optimize
- Optimize db queries limit use of transactions
<a name="v0.11.7"></a>
## [v0.11.7] - 2019-11-19
### Added
- Added support for multi-root queries
<a name="v0.11.6"></a>
## [v0.11.6] - 2019-11-15
### Fix
- Fix issues with JWT auth
- Fix bug with migration filename generation
- Fix bug with migration file name
<a name="v0.11.5"></a>
## [v0.11.5] - 2019-11-10
### Fix
- Fix bug with migration template name
<a name="v0.11.4"></a>
## [v0.11.4] - 2019-11-10
### Fix
- Fix bug with creating new migrations
<a name="v0.11.3"></a>
## [v0.11.3] - 2019-11-09
### Fix
- Fix macro syntax bug in app templates
<a name="v0.11.2"></a>
## [v0.11.2] - 2019-11-07
### Fix
- Fix bugs and add new production mode
<a name="v0.11.1"></a>
## [v0.11.1] - 2019-11-05
### Add
- Add nested where clause to filter based on related tables
### Block
- Block unauthorized requests when 'anon' role is not defined
### Update
- Update docs and website with new features
<a name="v0.11"></a>
## [v0.11] - 2019-11-01
### Add
- Add config driven presets for insert, update and upsert
- Add config driven presets for insert, update and upserta
- Add RBAC option to disable functions eg. count
- Add fuzz testing to 'serv' for the GQL hash parser
- Add fuzz testing to 'jsn' and 'qcode'
- Add ability to block queries and mutations by role
- Add built in 'anon' and 'user' roles
- Add role based access control
### Allow
- Allow config files to inherit from other config files
### Change
- Change config key inherit to inherits
### Get
- Get RBAC working for queries and mutations
### Optimize
- Optimize prepared statement flow for RBAC
### Preserve
- Preserve allow.list ordering on save
### Update
- Update filters section in guide
### Pull Requests
- Merge pull request [#11](https://github.com/dosco/super-graph/issues/11) from dosco/rbac
<a name="v0.10.1"></a>
## [v0.10.1] - 2019-10-06
### Add
- Add ability to set filters per operation / action
- Add upsert mutation
### Pull Requests
- Merge pull request [#10](https://github.com/dosco/super-graph/issues/10) from FourSigma/sm-examples-folder
<a name="v0.10"></a>
## [v0.10] - 2019-10-04
### Fix
- Fix return values for bulk mutations and delete
- Fix issues with mutation SQL
- Fix broken demo app
- Fix typo in 'across'
### Remove
- Remove extra link from README
### Update
- Update docs, getting started guide and mutations
### Pull Requests
- Merge pull request [#6](https://github.com/dosco/super-graph/issues/6) from muesli/typo-fixes
<a name="v0.9"></a>
## [v0.9] - 2019-10-01
### Fix
- Fix demo rails app broken build
<a name="v0.8"></a>
## [v0.8] - 2019-09-30
### Fix
- Fix invalid import bug
### Update
- Update documentation site
<a name="v0.7"></a>
## [v0.7] - 2019-09-29
### Failure
- Failure to prepare statements should be a warning
### Fix
- Fix duplicte column bug
<a name="v0.6"></a>
## [v0.6] - 2019-09-29
### Add
- Add database setup commands
- Add binary compression back to Dockerfile
- Add initialization command to setup new apps
- Add migrate command
- Add database seeding capability
- Add session variable for user id
- Add delete mutation
- Add update mutation
- Add insert mutation with bulk insert
- Add GoTO Aug, 19 presentation
- Add support for prepared statements
- Add end-to-end benchmaking
- Add object pooling for parser expressions
- Add request / response debugging for remote joins
- Add a presentation about GraphQL
- Add validation for remote JSON
- Add tracing for API stitching
- Add REST API stitching
- Add SQL query cacheing
- Add support for GraphQL variables
- Add fuzz testing to qcode
- Add test for Rails Redis cookie store integration
- Add an install guide
### Change
- Change fuzz test name to qcode
- Change logo from PNG to SVG
### Enabke
- Enabke reload on config change
### Fix
- Fix missing config name bug
- Fix new app templates
- Fix help message for migrate
- Fix session variable bug
- Fix test failures in `psql` and `serv`
- Fix demo docker services startup order
- Fix wrong value for false token bug. Reported by [@ThisIsMissEm](https://github.com/ThisIsMissEm)
- Fix allow.list file discovery bug
- Fix bug with allow list path
- Fix wrong value for use_allow_list in dev config
- Fix startup bug in demo script
- Fix url bug in allow list
- Fix bug [#676](https://github.com/dosco/super-graph/issues/676) found by fuzzer
- Fix race-condition in remote joins
- Fix cookie passing in web ui
- Fix bug with passing cookies in web ui
- Fix null pointer with invalid argument values
- Fix infinite loop bug in lexer
- Fix null pointer issue found by fuzz test
- Fix issue with fuzzbuzz config
- Fix demo to run as memory only
- Fix auth documentation
- Fix issue with web ui sizing
- Fix issue preventing docker-compose deploy
- Fix try demo documentation
### Futher
- Futher reduce allocations across hot paths
- Futher reduce allocations on the compiler hot path
- Futher optimize json parsing and editing performance
### Highlight
- Highlight top features better on the site
### Improve
- Improve readability of json parser code
- Improve the motivation section in the readme
- Improve the demo experience
### Make
- Make remote joins use parallel http requests
### Merge
- Merge branch 'master' into optimize-psql
### New
- New low allocation fast json parsing and editing library
### Optimize
- Optimize lexer and fix bugs
- Optimize the sql generator hot path
### Reduce
- Reduce alllocations done by the stack
- Reduce steps to run the demo
- Reduce allocations and improve perf over 50%
### Remove
- Remove unused packages
- Remove the 'hello' test app folder
- Remove other allocations in psql
### Use
- Use hash's as ids for table relationships
### Watch
- Watch and reload on config changes
<a name="v0.5"></a>
## [v0.5] - 2019-04-10
### Add
- Add supprt for new Rails 5.2 aes-256-gcm cookies
- Add query support for ts_rank and ts_headline
- Add full text search support using TSV indexes
- Add missing assets folder
- Add fetch by ID feature
- Add documentation
### Cleanup
- Cleanup and redesign config files
### Fix
- Fix bug with auth config parsing
### Redesign
- Redesign config file architecture
### Reduce
- Reduce realloc of maps and slices
### Update
- Update docs with full-text search information
<a name="v0.4"></a>
## [v0.4] - 2019-04-01
<a name="v0.3"></a>
## [v0.3] - 2019-04-01
### Add
- Add SQL execution timing and tracing
- Add support for HAVING with aggregate queries
- Add aggregrate functions to GQL queries
- Add Auth0 JWT support
- Add React UI building to the docker build flow
- Add compiler profiling
- Add bechmarks for GQL to SQL compile
- Add tests for gql to sql compile
### Cleanup
- Cleanup Dockerfile
### Fix
- Fix recurring packer issue docker hub builds
- Fix issue with asset packer breaking Docker builds
- Fix missing git package in Dockerfile
- Fix docker ignore values
- Fix image build failure on docker hub
- Fix build issue in Dockerfile
- Fix bugs and document the 'where' clause
- Fix perf issue with inflections
### Optimize
- Optimize docker image
### Pack
- Pack web UI with app into a single binary
### Upgrade
- Upgrade web UI packages
<a name="0.3"></a>
## 0.3 - 2019-03-24
### First
- First commit
### Fix
- Fix license to MIT
[Unreleased]: https://github.com/dosco/super-graph/compare/v0.12.6...HEAD
[v0.12.6]: https://github.com/dosco/super-graph/compare/v0.12.5...v0.12.6
[v0.12.5]: https://github.com/dosco/super-graph/compare/v0.12.4...v0.12.5
[v0.12.4]: https://github.com/dosco/super-graph/compare/v0.12.3...v0.12.4
[v0.12.3]: https://github.com/dosco/super-graph/compare/v0.12.2...v0.12.3
[v0.12.2]: https://github.com/dosco/super-graph/compare/v0.12.1...v0.12.2
[v0.12.1]: https://github.com/dosco/super-graph/compare/v0.12.0...v0.12.1
[v0.12.0]: https://github.com/dosco/super-graph/compare/v0.11.9...v0.12.0
[v0.11.9]: https://github.com/dosco/super-graph/compare/v0.11.8...v0.11.9
[v0.11.8]: https://github.com/dosco/super-graph/compare/v0.11.7...v0.11.8
[v0.11.7]: https://github.com/dosco/super-graph/compare/v0.11.6...v0.11.7
[v0.11.6]: https://github.com/dosco/super-graph/compare/v0.11.5...v0.11.6
[v0.11.5]: https://github.com/dosco/super-graph/compare/v0.11.4...v0.11.5
[v0.11.4]: https://github.com/dosco/super-graph/compare/v0.11.3...v0.11.4
[v0.11.3]: https://github.com/dosco/super-graph/compare/v0.11.2...v0.11.3
[v0.11.2]: https://github.com/dosco/super-graph/compare/v0.11.1...v0.11.2
[v0.11.1]: https://github.com/dosco/super-graph/compare/v0.11...v0.11.1
[v0.11]: https://github.com/dosco/super-graph/compare/v0.10.1...v0.11
[v0.10.1]: https://github.com/dosco/super-graph/compare/v0.10...v0.10.1
[v0.10]: https://github.com/dosco/super-graph/compare/v0.9...v0.10
[v0.9]: https://github.com/dosco/super-graph/compare/v0.8...v0.9
[v0.8]: https://github.com/dosco/super-graph/compare/v0.7...v0.8
[v0.7]: https://github.com/dosco/super-graph/compare/v0.6...v0.7
[v0.6]: https://github.com/dosco/super-graph/compare/v0.5...v0.6
[v0.5]: https://github.com/dosco/super-graph/compare/v0.4...v0.5
[v0.4]: https://github.com/dosco/super-graph/compare/v0.3...v0.4
[v0.3]: https://github.com/dosco/super-graph/compare/0.3...v0.3

3
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,3 @@
# Code of Conduct
Be excellent to each other. Treat others the way you'd like to be treated. We are all here to learn, build great software and make new friends.

82
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,82 @@
# Contributing to Super Graph
Super Graph is a very approchable code-base and a project that is easy for almost
anyone with basic GO knowledge to start contributing to. It is also a young project
so a lot of high value work is there for the taking.
Even the GraphQL to SQL compiler that is at the heart of Super Graph is essentially a text book compiler with clean and easy to read code. The data structures used by the lexer, parser and sql generator are easy to understand and modify.
Finally we do have a lot of test for critical parts of the codebase which makes it easy for you to modify with confidence. I'm always available for questions or any sort of guidance so feel fee to reach out over twitter or discord.
* [Getting Started](#getting-started)
* [Setting Up the Development Environment](#setup-development-environment)
* [Prerequisites](#prerequisites)
* [Get the Super Graph source](#get-source-code)
* [Start the development envoirnment ](#start-the-development-envoirnment)
* [Testing](#testing-and-linting)
* [Contributing](#contributing)
* [Guidelines](#guidelines)
* [Code style](#code-style)
## Getting Started
- Read the [Getting Started Guide](https://supergraph.dev/guide.html#get-started)
## Setup Development Environment
### Prerequisites
- Install [Git](https://git-scm.com/) (may be already installed on your system, or available through your OS package manager)
- Install [Go 1.13 or above](https://golang.org/doc/install)
- Install [Docker](https://docs.docker.com/v17.09/engine/installation/)
### Get source code
The entire build flow uses `Makefile` there is a whole list of sub-commands you
can use to build, test, install, lint, etc.
```bash
git clone https://github.com/dosco/super-graph
cd ./super-graph
make help
```
### Start the development envoirnment
The entire development flow is packaged into a `docker-compose` work flow. The below `up` command will launch A Postgres database, a example e-commerce app in Rails and Super Graph in development mode. The `db:seed` Rails task will insert sample data into Postgres.
```bash
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
docker-compose up
```
### Learn how the code works
[Super Graph codebase explained](https://supergraph.dev/internals.html)
### Testing and Linting
```
make lint test
```
## Contributing
### Guidelines
- **Pull requests are welcome**, as long as you're willing to put in the effort to meet the guidelines.
- Aim for clear, well written, maintainable code.
- Simple and minimal approach to features, like Go.
- Refactoring existing code now for better performance, better readability or better testability wins over adding a new feature.
- Don't add a function to a module that you don't use right now, or doesn't clearly enable a planned functionality.
- Don't ship a half done feature, which would require significant alterations to work fully.
- Avoid [Technical debt](https://en.wikipedia.org/wiki/Technical_debt) like cancer.
- Leave the code cleaner than when you began.
### Code style
- We're following [Go Code Review](https://github.com/golang/go/wiki/CodeReviewComments).
- Use `go fmt` to format your code before committing.
- If you see *any code* which clearly violates the style guide, please fix it and send a pull request. No need to ask for permission.
- Avoid unnecessary vertical spaces. Use your judgment or follow the code review comments.
- Wrap your code and comments to 100 characters, unless doing so makes the code less legible.

View File

@ -6,13 +6,13 @@ RUN yarn
RUN yarn build
# stage: 2
FROM golang:1.13beta1-alpine as go-build
FROM golang:1.13.4-alpine as go-build
RUN apk update && \
apk add --no-cache make && \
apk add --no-cache git && \
apk add --no-cache upx=3.95-r2
RUN go get -u github.com/rafaelsq/wtc && \
go get -u github.com/GeertJohan/go.rice/rice
RUN GO111MODULE=off go get -u github.com/rafaelsq/wtc
WORKDIR /app
COPY . /app
@ -20,11 +20,9 @@ COPY . /app
RUN mkdir -p /app/web/build
COPY --from=react-build /web/build/ ./web/build/
ENV GO111MODULE=on
RUN go mod vendor
RUN go generate ./... && \
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o super-graph && \
echo "Compressing binary, will take a bit of time..." && \
RUN make build
RUN echo "Compressing binary, will take a bit of time..." && \
upx --ultra-brute -qq super-graph && \
upx -t super-graph

189
LICENSE
View File

@ -1,21 +1,176 @@
The MIT License (MIT)
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (c) 2019-present Vikram Rangnekar. twitter.com/dosco
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
1. Definitions.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

109
Makefile Normal file
View File

@ -0,0 +1,109 @@
BUILD ?= $(shell git rev-parse --short HEAD)
BUILD_DATE ?= $(shell git log -1 --format=%ci)
BUILD_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
BUILD_VERSION ?= $(shell git describe --always --tags)
GOPATH ?= $(shell go env GOPATH)
ifndef GOPATH
override GOPATH = $(HOME)/go
endif
export GO111MODULE := on
# Build-time Go variables
version = github.com/dosco/super-graph/serv.version
gitBranch = github.com/dosco/super-graph/serv.gitBranch
lastCommitSHA = github.com/dosco/super-graph/serv.lastCommitSHA
lastCommitTime = github.com/dosco/super-graph/serv.lastCommitTime
BUILD_FLAGS ?= -ldflags '-s -w -X ${lastCommitSHA}=${BUILD} -X "${lastCommitTime}=${BUILD_DATE}" -X "${version}=${BUILD_VERSION}" -X ${gitBranch}=${BUILD_BRANCH}'
.PHONY: all build gen clean test run lint changlog release version help $(PLATFORMS)
test:
@go test -v ./...
BIN_DIR := $(GOPATH)/bin
GORICE := $(BIN_DIR)/rice
GOLANGCILINT := $(BIN_DIR)/golangci-lint
GITCHGLOG := $(BIN_DIR)/git-chglog
WEB_BUILD_DIR := ./web/build/manifest.json
$(GORICE):
@GO111MODULE=off go get -u github.com/GeertJohan/go.rice/rice
$(WEB_BUILD_DIR):
@echo "First install Yarn and create a build of the web UI found under ./web"
@echo "Command: cd web && yarn build"
@exit 1
$(GITCHGLOG):
@GO111MODULE=off go get -u github.com/git-chglog/git-chglog/cmd/git-chglog
changelog: $(GITCHGLOG)
@git-chglog $(ARGS)
$(GOLANGCILINT):
@GO111MODULE=off curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.21.0
lint: $(GOLANGCILINT)
@golangci-lint run ./... --skip-dirs-use-default
BINARY := super-graph
LDFLAGS := -s -w
PLATFORMS := windows linux darwin
os = $(word 1, $@)
$(PLATFORMS): lint test
@mkdir -p release
@GOOS=$(os) GOARCH=amd64 go build $(BUILD_FLAGS) -o release/$(BINARY)-$(BUILD_VERSION)-$(os)-amd64
release: windows linux darwin
all: lint test $(BINARY)
build: $(BINARY)
gen: $(GORICE) $(WEB_BUILD_DIR)
@go generate ./...
$(BINARY): clean
@go build $(BUILD_FLAGS) -o $(BINARY)
clean:
@rm -f $(BINARY)
run: clean
@go run $(BUILD_FLAGS) main.go $(ARGS)
install:
@echo $(GOPATH)
@echo "Commit Hash: `git rev-parse HEAD`"
@echo "Old Hash: `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`"
@go install $(BUILD_FLAGS)
@echo "New Hash:" `shasum $(GOPATH)/bin/$(BINARY) 2>/dev/null | cut -c -32`
uninstall: clean
@go clean -i -x
version:
@echo Super Graph ${BUILD_VERSION}
@echo Build: ${BUILD}
@echo Build date: ${BUILD_DATE}
@echo Branch: ${BUILD_BRANCH}
@echo Go version: $(shell go version)
help:
@echo
@echo Build commands:
@echo " make build - Build supergraph binary"
@echo " make install - Install supergraph binary"
@echo " make uninstall - Uninstall supergraph binary"
@echo " make [platform] - Build for platform [linux|darwin|windows]"
@echo " make release - Build all platforms"
@echo " make run - Run supergraph (eg. make run ARGS=\"help\")"
@echo " make test - Run all tests"
@echo " make changelog - Generate changelog (eg. make changelog ARGS=\"help\")"
@echo " make help - This help"
@echo

13
NOTICE Normal file
View File

@ -0,0 +1,13 @@
Copyright 2019 Vikram Rangnekar
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,16 +1,25 @@
<a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a>
<!-- <a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a> -->
# Super Graph - Instant GraphQL APIs for your apps.
<img src="docs/.vuepress/public/super-graph.png" width="250" />
## Build web products faster. No code needed. GraphQL auto. transformed into efficient database queries.
### Build web products faster. Secure high performance GraphQL
![MIT license](https://img.shields.io/github/license/dosco/super-graph.svg)
![Apache Public License 2.0](https://img.shields.io/github/license/dosco/super-graph.svg)
![Docker build](https://img.shields.io/docker/cloud/build/dosco/super-graph.svg)
![Cloud native](https://img.shields.io/badge/cloud--native-enabled-blue.svg)
[![Discord Chat](https://img.shields.io/discord/628796009539043348.svg)](https://discord.gg/6pSWCTZ)
## What is Super Graph
Is designed to 100x your developer productivity. Super Graph will instantly and without you writing code provide you a high performance and secure GraphQL API for Postgres DB. GraphQL queries are translated into a single fast SQL query. No more writing API code as you develop
your web frontend just make the query you need and Super Graph will do the rest.
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, role and attribute based access control, support for JWT tokens, built-in DB mutations and seeding, and a lot more.
![GraphQL](docs/.vuepress/public/graphql.png?raw=true "")
## The story of Super Graph?
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
@ -25,15 +34,14 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
## Features
- Role based access control
- Works with Ruby-On-Rails databases
- Automatically learns database schemas and relationships
- Complex nested queries and mutations
- Auto learns database tables and relationships
- Role and Attribute based access control
- Full text search and aggregations
- Rails authentication supported (Redis, Memcache, Cookie)
- JWT tokens supported (Auth0, etc)
- Join database with remote REST APIs
- Highly optimized and fast Postgres SQL queries
- GraphQL queries and mutations
- Join database queries with remote REST APIs
- Also works with existing Ruby-On-Rails apps
- Rails authentication supported (Redis, Memcache, Cookie)
- A simple config file
- High performance GO codebase
- Tiny docker image and low memory requirements
@ -41,6 +49,16 @@ This compiler is what sits at the heart of Super Graph with layers of useful fun
- Database migrations tool
- Database seeding tool
## Get started
```
git clone https://github.com/dosco/super-graph
cd ./super-graph
make install
super-graph new <app_name>
```
## Documentation
[supergraph.dev](https://supergraph.dev)
@ -56,7 +74,7 @@ Twitter or Discord.
## License
[MIT](http://opensource.org/licenses/MIT)
[Apache Public License 2.0](https://opensource.org/licenses/Apache-2.0)
Copyright (c) 2019-present Vikram Rangnekar

337
allow/allow.go Normal file
View File

@ -0,0 +1,337 @@
package allow
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type Item struct {
Name string
key string
URI string
Query string
Vars json.RawMessage
}
type List struct {
filepath string
saveChan chan Item
}
type Config struct {
CreateIfNotExists bool
Persist bool
}
func New(cpath string, conf Config) (*List, error) {
al := List{}
if len(cpath) != 0 {
fp := path.Join(cpath, "allow.list")
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
al.filepath = fp
} else if !os.IsNotExist(err) {
return nil, err
}
}
if len(al.filepath) == 0 {
if !conf.CreateIfNotExists {
return nil, errors.New("allow.list not found")
}
if len(cpath) == 0 {
al.filepath = "./config/allow.list"
} else {
al.filepath = path.Join(cpath, "allow.list")
}
}
var err error
if conf.Persist {
al.saveChan = make(chan Item)
go func() {
for v := range al.saveChan {
if err = al.save(v); err != nil {
break
}
}
}()
}
if err != nil {
return nil, err
}
return &al, nil
}
func (al *List) IsPersist() bool {
return al.saveChan != nil
}
func (al *List) Add(vars []byte, query, uri string) error {
if al.saveChan == nil {
return errors.New("allow.list is read-only")
}
if len(query) == 0 {
return errors.New("empty query")
}
var q string
for i := 0; i < len(query); i++ {
c := query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
q = query
break
} else if c == '{' {
q = "query " + query
break
}
}
al.saveChan <- Item{
URI: uri,
Query: q,
Vars: vars,
}
return nil
}
func (al *List) Load() ([]Item, error) {
var list []Item
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
return list, err
}
if len(b) == 0 {
return list, nil
}
var uri string
var varBytes []byte
itemMap := make(map[string]struct{})
s, e, c := 0, 0, 0
ty := 0
for {
fq := false
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
uri = strings.TrimSpace(string(b[(s + 1):e]))
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
fq = true
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
if fq {
query := string(b[s:(e + 1)])
name := QueryName(query)
key := strings.ToLower(name)
if _, ok := itemMap[key]; !ok {
v := Item{
Name: name,
key: key,
URI: uri,
Query: query,
Vars: varBytes,
}
list = append(list, v)
}
varBytes = nil
}
e++
if e >= len(b) {
break
}
}
return list, nil
}
func (al *List) save(item Item) error {
item.Name = QueryName(item.Query)
item.key = strings.ToLower(item.Name)
if len(item.Name) == 0 {
return nil
}
list, err := al.Load()
if err != nil {
return err
}
index := -1
for i, v := range list {
if strings.EqualFold(v.Name, item.Name) {
index = i
break
}
}
if index != -1 {
list[index] = item
} else {
list = append(list, item)
}
f, err := os.Create(al.filepath)
if err != nil {
return err
}
defer f.Close()
sort.Slice(list, func(i, j int) bool {
return strings.Compare(list[i].key, list[j].key) == -1
})
for _, v := range list {
_, err := f.WriteString(fmt.Sprintf("# %s\n\n", v.URI))
if err != nil {
return err
}
if len(v.Vars) != 0 && !bytes.Equal(v.Vars, []byte("{}")) {
vj, err := json.MarshalIndent(v.Vars, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal vars: %v", err)
}
_, err = f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
if err != nil {
return err
}
}
if v.Query[0] == '{' {
_, err = f.WriteString(fmt.Sprintf("query %s\n\n", v.Query))
} else {
_, err = f.WriteString(fmt.Sprintf("%s\n\n", v.Query))
}
if err != nil {
return err
}
}
return nil
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}
func QueryName(b string) string {
state, s := 0, 0
for i := 0; i < len(b); i++ {
switch {
case state == 2 && b[i] == '{':
return b[s:i]
case state == 2 && b[i] == ' ':
return b[s:i]
case state == 1 && b[i] == '{':
return ""
case state == 1 && b[i] != ' ':
s = i
state = 2
case state == 1 && b[i] == ' ':
continue
case i != 0 && b[i] == ' ' && (b[i-1] == 'n' || b[i-1] == 'y'):
state = 1
}
}
return ""
}

82
allow/allow_test.go Normal file
View File

@ -0,0 +1,82 @@
package allow
import (
"testing"
)
func TestGQLName1(t *testing.T) {
var q = `
query {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) { id name } }`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}
func TestGQLName2(t *testing.T) {
var q = `
query hakuna_matata {
products(
distinct: [price]
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }
) {
id
name
}
}`
name := QueryName(q)
if name != "hakuna_matata" {
t.Fatal("Name should be 'hakuna_matata', not ", name)
}
}
func TestGQLName3(t *testing.T) {
var q = `
mutation means{ users { id } }`
// var v2 = ` { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } `
name := QueryName(q)
if name != "means" {
t.Fatal("Name should be 'means', not ", name)
}
}
func TestGQLName4(t *testing.T) {
var q = `
query no_worries
users {
id
}
}`
name := QueryName(q)
if name != "no_worries" {
t.Fatal("Name should be 'no_worries', not ", name)
}
}
func TestGQLName5(t *testing.T) {
var q = `
{
users {
id
}
}`
name := QueryName(q)
if len(name) != 0 {
t.Fatal("Name should be empty, not ", name)
}
}

15
allow/fuzz_test.go Normal file
View File

@ -0,0 +1,15 @@
package allow
import "testing"
func TestFuzzCrashers(t *testing.T) {
var crashers = []string{
"query",
"q",
"que",
}
for _, f := range crashers {
_ = QueryName(f)
}
}

View File

@ -73,43 +73,6 @@ mutation {
}
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
}
query {
products {
id
name
}
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
}
query {
products {
id
@ -133,21 +96,6 @@ mutation {
}
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
}
query {
products {
id
@ -174,39 +122,6 @@ mutation {
}
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
}
query {
products {
id
name
users {
email
}
}
}
query {
me {
id
email
full_name
}
}
variables {
"update": {
"name": "Helloo",
@ -224,67 +139,617 @@ mutation {
}
variables {
"data": [
{
"name": "Gumbo1",
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"created_at": "now",
"updated_at": "now"
}
]
"data": {
"name": "WOOO",
"price": 50.5
}
}
mutation {
products(insert: $data) {
id
name
}
}
query getProducts {
products {
id
name
price
description
}
}
query {
product {
deals {
id
name
price
}
}
variables {
"beer": "smoke"
}
query beerSearch {
products(search: $beer) {
id
name
search_rank
search_headline_description
}
}
query {
user {
id
full_name
}
}
variables {
"data": {
"email": "goo1@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"email": "goo12@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": [
{
"name": "Banana 1",
"price": 1.1,
"created_at": "now",
"updated_at": "now"
},
{
"name": "Banana 2",
"price": 2.2,
"created_at": "now",
"updated_at": "now"
}
]
}
}
mutation {
user(insert: $data) {
id
full_name
email
products {
id
name
price
}
}
}
variables {
"data": {
"name": "Banana 3",
"price": 1.1,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "a2@a.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
products(insert: $data) {
id
name
price
user {
id
full_name
email
}
}
}
variables {
"update": {
"name": "my_name",
"description": "my_desc"
}
}
mutation {
product(id: 15, update: $update, where: {id: {eq: 1}}) {
id
name
}
}
variables {
"update": {
"name": "my_name",
"description": "my_desc"
}
}
mutation {
product(update: $update, where: {id: {eq: 1}}) {
id
name
}
}
variables {
"update": {
"name": "my_name 2",
"description": "my_desc 2"
}
}
mutation {
product(update: $update, where: {id: {eq: 1}}) {
id
name
description
}
}
variables {
"data": {
"sale_type": "tuutuu",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude1@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
}
mutation {
purchase(update: $data, id: 5) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}
variables {
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
mutation {
user(update: $data, where: {id: {eq: 8}}) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
query {
user(where: {id: {eq: 8}}) {
id
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "thedude@rug.com"
}
}
}
query {
user {
email
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "booboo@demo.com"
}
}
}
mutation {
product(update: $data, id: 6) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "booboo@demo.com"
}
}
}
query {
product(id: 6) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"email": "thedude123@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": {
"id": 7
},
"disconnect": {
"id": 8
}
}
}
}
mutation {
user(update: $data, id: 6) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 5,
"email": "test@test.com"
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"email": "thed44ude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": {
"id": 5
}
}
}
}
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 5
}
}
}
}
mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": [
{
"name": "Gumbo1",
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 6
}
}
},
{
"name": "Coconut",
"price": 2.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": {
"id": 3
}
}
}
]
}
mutation {
products(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": [
{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
},
{
"name": "Gumbo2",
"name": "Coconut",
"price": 2.25,
"created_at": "now",
"updated_at": "now"
}
]
}
query {
products {
mutation {
products(insert: $data) {
id
name
description
users {
user {
id
full_name
email
}
}
}
query {
users {
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": {
"id": 5,
"email": "test@test.com"
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
email
picture: avatar
password
full_name
products(limit: 2, where: {price: {gt: 10}}) {
name
user {
id
name
description
price
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 9) {
id
name
user_id
}
}
variables {
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": {
"id": 5
}
}
}
}
mutation {
product(update: $data, id: 2) {
id
name
user_id
}
}

View File

@ -5,11 +5,14 @@ web_ui: true
# debug, info, warn, error, fatal, panic
log_level: "debug"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
# the allow list in ./config/allow.list
production: true
production: false
# Throw a 401 on auth failure for queries that need auth
auth_fail_block: false
@ -97,23 +100,21 @@ database:
# Enable this if you need the user id in triggers, etc
set_user_id: false
# Define variables here that you want to use in filters
# sub-queries must be wrapped in ()
# database ping timeout is used for db health checking
ping_timeout: 1m
# Define additional variables here to be used with filters
variables:
account_id: "(select account_id from users where id = $user_id)"
admin_account_id: "5"
# Define defaults to for the field key and values below
defaults:
# filters: ["{ user_id: { eq: $user_id } }"]
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
tables:
- name: customers
@ -136,15 +137,23 @@ tables:
name: me
table: users
- name: deals
table: products
- name: users
columns:
- name: email
related_to: products.name
roles_query: "SELECT * FROM users WHERE id = $user_id"
roles:
- name: anon
tables:
- name: products
limit: 10
query:
limit: 10
columns: ["id", "name", "description" ]
aggregation: false
@ -157,6 +166,16 @@ roles:
delete:
block: false
- name: deals
query:
limit: 3
aggregation: false
- name: purchases
query:
limit: 3
aggregation: false
- name: user
tables:
- name: users
@ -167,15 +186,15 @@ roles:
query:
limit: 50
filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
disable_functions: false
insert:
filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
presets:
- user_id: "$user_id"
- created_at: "now"
- updated_at: "now"
update:
filters: ["{ user_id: { eq: $user_id } }"]
columns:
@ -188,8 +207,7 @@ roles:
block: true
- name: admin
match: id = 1
match: id = 1000
tables:
- name: users
# query:
# filters: ["{ account_id: { _eq: $account_id } }"]
filters: []

View File

@ -9,6 +9,9 @@ web_ui: false
# debug, info, warn, error, fatal, panic, disable
log_level: "info"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
@ -41,40 +44,6 @@ enable_tracing: true
# SG_AUTH_RAILS_REDIS_PASSWORD
# SG_AUTH_JWT_PUBLIC_KEY_FILE
# inflections:
# person: people
# sheep: sheep
auth:
# Can be 'rails' or 'jwt'
type: rails
cookie: _app_session
rails:
# Rails version this is used for reading the
# various cookies formats.
version: 5.2
# Found in 'Rails.application.config.secret_key_base'
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
# Remote cookie store. (memcache or redis)
# url: redis://127.0.0.1:6379
# password: test
# max_idle: 80,
# max_active: 12000,
# In most cases you don't need these
# salt: "encrypted cookie"
# sign_salt: "signed encrypted cookie"
# auth_salt: "authenticated encrypted cookie"
# jwt:
# provider: auth0
# secret: abc335bfcfdb04e50db5bb0a4d67ab9
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
database:
type: postgres
host: db
@ -88,4 +57,7 @@ database:
# Set session variable "user.id" to the user id
# Enable this if you need the user id in triggers, etc
set_user_id: false
set_user_id: false
# database ping timeout is used for db health checking
ping_timeout: 5m

View File

@ -46,10 +46,10 @@ for (i = 0; i < product_count; i++) {
var data = {
name: fake.beer_name(),
description: desc,
price: fake.price(),
user_id: user.id,
created_at: "now",
updated_at: "now"
price: fake.price()
//user_id: user.id,
//created_at: "now",
//updated_at: "now"
}
var res = graphql(" \
@ -57,7 +57,9 @@ for (i = 0; i < product_count; i++) {
product(insert: $data) { \
id \
} \
}", { data: data })
}", { data: data }, {
user_id: 5
})
products.push(res.product)
}

View File

@ -37,6 +37,6 @@ services:
command: wtc
depends_on:
- db
- rails_app
# - rails_app
# - redis

View File

@ -8,15 +8,11 @@
<div class="bg-bottom bg-no-repeat bg-cover">
<div class="text-center md:text-left pt-24">
<h1 v-if="data.heroText !== null" class="text-5xl font-bold text-black pb-0 uppercase">
{{ data.heroText || $title || 'Hello' }}
<img src="/super-graph.png" width="250" />
</h1>
<p class="text-2xl text-gray-700 leading-tight pb-0">
{{ data.tagline || $description || 'Welcome to your VuePress site' }}
</p>
<p class="text-lg text-gray-600 leading-tight">
{{ data.longTagline }}
<p class="text-4xl text-gray-800 leading-tight mt-1">
Build web products faster. Secure high performance GraphQL
</p>
<NavLink
@ -141,7 +137,7 @@
</h1>
<div class="text-2xl md:text-3xl">
<small class="text-sm">Download the Docker compose config for the demo</small>
<pre>&#8227; curl -L -o demo.yml https://bit.ly/2mq05lW</pre>
<pre>&#8227; curl -L -o demo.yml https://bit.ly/2FZS0uw</pre>
<small class="text-sm">Setup the demo database</small>
<pre>&#8227; docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed</pre>

View File

@ -12,10 +12,10 @@ module.exports = {
nav: [
{ text: 'Docs', link: '/guide' },
{ text: 'Deploy', link: '/deploy' },
{ text: 'Internals', link: '/internals' },
{ text: 'Github', link: 'https://github.com/dosco/super-graph' },
{ text: 'Docker', link: 'https://hub.docker.com/r/dosco/super-graph/builds' },
{ text: 'Join Chat', link: 'https://discord.gg/NKdXBc' },
],
serviceWorker: {
updatePopup: true
@ -26,7 +26,7 @@ module.exports = {
['meta', { prefix: ogprefix, property: 'og:title', content: title }],
['meta', { prefix: ogprefix, property: 'twitter:title', content: title }],
['meta', { prefix: ogprefix, property: 'og:type', content: 'website' }],
['meta', { prefix: ogprefix, property: 'og:url', content: 'https://supergraph.dev }],
['meta', { prefix: ogprefix, property: 'og:url', content: 'https://supergraph.dev' }],
['meta', { prefix: ogprefix, property: 'og:description', content: description }],
//['meta', { prefix: ogprefix, property: 'og:image', content: 'https://wireupyourfrontend.com/assets/logo.png' }],
// ['meta', { name: 'apple-mobile-web-app-capable', content: 'yes' }],

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -26,5 +26,5 @@ features:
- title: Free and Open Source
details: Not a VC funded startup. Not even a startup just good old open source code
footer: MIT Licensed | Copyright © 2018-present Vikram Rangnekar
footer: Apache Public License 2.0 | Copyright © 2018-present Vikram Rangnekar
---

View File

@ -4,13 +4,15 @@ sidebar: auto
# Guide to Super Graph
Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries. Also Designed to integrate with your Rails apps.
Super Graph is a service that instantly and without code gives you a high performance and secure GraphQL API. Your GraphQL queries are auto translated into a single fast SQL query. No more spending weeks or months writing backend API code. Just make the query you need and Super Graph will do the rest.
Super Graph has a rich feature set like integrating with your existing Ruby on Rails apps, joining your DB with data from remote APIs, Role and Attribute based access control, Support for JWT tokens, DB migrations, seeding and a lot more.
## Features
- Role based access control
- Works with Ruby-On-Rails databases
- Role and Attribute based access control
- Works with existing Ruby-On-Rails apps
- Automatically learns database schemas and relationships
- Full text search and aggregations
- Rails authentication supported (Redis, Memcache, Cookie)
@ -25,17 +27,18 @@ Get an instant high performance GraphQL API for Postgres. No code needed. GraphQ
- Database migrations tool
- Database seeding tool
## Try the demo app
```bash
# download the Docker compose config for the demo
curl -L -o demo.yml https://bit.ly/2mq05lW
# clone the repository
git clone https://github.com/dosco/super-graph
# setup the demo rails app & database and run it
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
docker-compose run rails_app rake db:create db:migrate db:seed
# run the demo
docker-compose -f demo.yml up
docker-compose up
# signin to the demo app (user1@demo.com / 123456)
open http://localhost:3000
@ -44,14 +47,14 @@ open http://localhost:3000
open http://localhost:8080
```
::: warning DEMO REQUIREMENTS
::: tip DEMO REQUIREMENTS
This demo requires `docker` you can either install it using `brew` or from the
docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/)
:::
#### Trying out GraphQL
We currently fully support queries and mutations. Support for `subscriptions` is work in progress. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
We fully support queries and mutations. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
#### GQL Query
@ -73,32 +76,6 @@ query {
}
```
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
```json
{
"data": {
"name": "Art of Computer Programming",
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
"price": 30.5
}
}
```
```graphql
mutation {
product(insert: $data) {
id
name
}
}
```
The above GraphQL query returns the JSON result below. It handles all
kinds of complexity without you having to writing a line of code.
For example there is a while greater than `gt` and a limit clause on a child field. And the `avatar` field is renamed to `picture`. The `password` field is blocked and not returned. Finally the relationship between the `users` table and the `products` table is auto discovered and used.
#### JSON Result
```json
@ -125,19 +102,107 @@ For example there is a while greater than `gt` and a limit clause on a child fie
}
```
#### Try with an authenticated user
::: tip Testing with a user
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI.
:::
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI you'll see when you visit the above link. You can also directly run queries from the commandline like below.
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
#### Querying the GQL endpoint
```json
{
"data": {
"name": "Art of Computer Programming",
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
"price": 30.5
}
}
```
```bash
```graphql
mutation {
product(insert: $data) {
id
name
}
}
```
# fetch the response json directly from the endpoint using user id 5
curl 'http://localhost:8080/api/v1/graphql' \
-H 'content-type: application/json' \
-H 'X-User-ID: 5' \
--data-binary '{"query":"{ products { name price users { email }}}"}'
## Why Super Graph
Let's take a simple example say you want to fetch 5 products priced over 12 dollars along with the photos of the products and the users that owns them. Additionally also fetch the last 10 of your own purchases along with the name and ID of the product you purchased. This is a common type of query to render a view in say an ecommerce app. Lets be honest it's not very exciting write and maintain. Keep in mind the data needed will only continue to grow and change as your app evolves. Developers might find that most ORMs will not be able to do all of this in a single SQL query and will require n+1 queries to fetch all the data and assembly it into the right JSON response.
What if I told you Super Graph will fetch all this data with a single SQL query and without you having to write a single line of code. Also as your app evolves feel free to evolve the query as you like. In our experience Super Graph saves us hundreds or thousands of man hours that we can put towards the more exciting parts of our app.
#### GraphQL Query
```graphql
query {
products(limit 5, where: { price: { gt: 12 } }) {
id
name
description
price
photos {
url
}
user {
id
email
picture : avatar
full_name
}
}
purchases(
limit 10,
order_by: { created_at: desc } ,
where: { user_id: { eq: $user_id } }
) {
id
created_at
product {
id
name
}
}
}
```
#### JSON Result
```json
"data": {
"products": [
{
"id": 1,
"name": "Oaked Arrogant Bastard Ale",
"description": "Coors lite, European Amber Lager, Perle, 1272 - American Ale II, 38 IBU, 6.4%, 9.7°Blg",
"price": 20,
"photos: [{
"url": "https://www.scienceworld.ca/wp-content/uploads/science-world-beer-flavours.jpg"
}],
"user": {
"id": 1,
"email": "user0@demo.com",
"picture": "https://robohash.org/sitaliquamquaerat.png?size=300x300&set=set1",
"full_name": "Mrs. Wilhemina Hilpert"
}
},
...
]
},
"purchases": [
{
"id": 5,
"created_at": "2020-01-24T05:34:39.880599",
"product": {
"id": 45,
"name": "Brooklyn Black",
}
},
...
]
}
```
## Get Started
@ -147,9 +212,13 @@ Super Graph can generate your initial app for you. The generated app will have c
You can then add your database schema to the migrations, maybe create some seed data using the seed script and launch Super Graph. You're now good to go and can start working on your UI frontend in React, Vue or whatever.
```bash
# use the below command to download and install Super Graph. You will need Go 1.13 or above
GO111MODULE=on go get -u github.com/dosco/super-graph
# Download and install Super Graph. You will need Go 1.13 or above
git clone https://github.com/dosco/super-graph && cd super-graph && make install
```
And then create and launch you're new app
```bash
# create a new app and change to it's directory
super-graph new blog; cd blog
@ -276,7 +345,7 @@ transmission_gear_type
// Text
word
sentence
paragrph
paragraph
question
quote
@ -476,6 +545,21 @@ query {
}
```
Multiple tables can also be fetched using a single GraphQL query. This is very fast since the entire query is converted into a single SQL query which the database can efficiently run.
```graphql
query {
user {
full_name
email
}
products {
name
description
}
}
```
### Fetching data
To fetch a specific `product` by it's ID you can use the `id` argument. The real name id field will be resolved automatically so this query will work even if your id column is named something like `product_id`.
@ -498,7 +582,20 @@ query {
}
```
### Advanced queries
### Sorting
To sort or ordering results just use the `order_by` argument. This can be combined with `where`, `search`, etc to build complex queries to fit you needs.
```graphql
query {
products(order_by: { cached_votes_total: desc }) {
id
name
}
}
```
### Filtering
Super Graph support complex queries where you can add filters, ordering,offsets and limits on the query. For example the below query will list all products where the price is greater than 10 and the id is not 5.
@ -616,9 +713,7 @@ query {
}
```
## Mutations
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete` database operations. Here are some examples.
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update`, `upsert` and `delete`. You can also do complex nested inserts and updates.
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
@ -801,7 +896,135 @@ mutation {
}
```
### Using variables
Often you will need to create or update multiple related items at the same time. This can be done using nested mutations. For example you might need to create a product and assign it to a user, or create a user and his products at the same time. You just have to use simple json to define you mutation and Super Graph takes care of the rest.
### Nested Insert
Create a product item first and then assign it to a user
```json
{
"data": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": { "id": 5 }
}
}
}
```
```graphql
mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}
```
Or it's reverse, create the user first and then his product
```json
{
"data": {
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}
}
```
```graphql
mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}
```
### Nested Update
Update a product item first and then assign it to a user
```json
{
"data": {
"name": "Apple",
"price": 1.25,
"user": {
"connect": { "id": 5 }
}
}
}
```
```graphql
mutation {
product(update: $data, id: 5) {
id
name
user {
id
full_name
email
}
}
}
```
Or it's reverse, update a user first and then his product
```json
{
"data": {
"email": "newemail@me.com",
"full_name": "The Dude",
"product": {
"name": "Banana",
"price": 1.25,
}
}
}
```
```graphql
mutation {
user(update: $data, id: 1) {
id
full_name
email
product {
id
name
price
}
}
}
```
## Using Variables
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The build-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
and better client side code.
@ -823,7 +1046,105 @@ fetch('http://localhost:8080/api/v1/graphql', {
.then(res => console.log(res.data));
```
### Full text search
## GraphQL with React
This is a quick simple example using `graphql.js` [https://github.com/f/graphql.js/](https://github.com/f/graphql.js/)
```js
import React, { useState, useEffect } from 'react'
import graphql from 'graphql.js'
// Create a GraphQL client pointing to Super Graph
var graph = graphql("http://localhost:3000/api/v1/graphql", { asJSON: true })
const App = () => {
const [user, setUser] = useState(null)
useEffect(() => {
async function action() {
// Use the GraphQL client to execute a graphQL query
// The second argument to the client are the variables you need to pass
const result = await graph(`{ user { id first_name last_name picture_url } }`)()
setUser(result)
}
action()
}, []);
return (
<div className="App">
<h1>{ JSON.stringify(user) }</h1>
</div>
);
}
```
export default App;
## Advanced Columns
The ablity to have `JSON/JSONB` and `Array` columns is often considered in the top most useful features of Postgres. There are many cases where using an array or a json column saves space and reduces complexity in your app. The only issue with these columns is the really that your SQL queries can get harder to write and maintain.
Super Graph steps in here to help you by supporting these columns right out of the box. It allows you to work with these columns just like you would with tables. Joining data against or modifying array columns using the `connect` or `disconnect` keywords in mutations is fully supported. Another very useful feature is the ability to treat `json` or `binary json (jsonb)` columns as seperate tables, even using them in nested queries joining against related tables. To replicate these features on your own will take a lot of complex SQL. Using Super Graph means you don't have to deal with any of this it just works.
### Array Columns
Configure a relationship between an array column `tag_ids` which contains integer id's for tags and the column `id` in the table `tags`.
```yaml
tables:
- name: posts
columns:
- name: tag_ids
related_to: tags.id
```
```graphql
query {
posts {
title
tags {
name
image
}
}
}
```
### JSON Column
Configure a JSON column called `tag_count` in the table `products` into a seperate table. This JSON column contains a json array of objects each with a tag id and a count of the number of times the tag was used. As a seperate table you can nest it into your GraphQL query and treat it like table using any of the standard features like `order_by`, `limit`, `where clauses`, etc.
The configuration below tells Super Graph to create a synthetic table called `tag_count` using the column `tag_count` from the `products` table. And that this new table has two columns `tag_id` and `count` of the listed types and with the defined relationships.
```yaml
tables:
- name: tag_count
table: products
columns:
- name: tag_id
type: bigint
related_to: tags.id
- name: count
type: int
```
```graphql
query {
products {
name
tag_counts {
count
tag {
name
}
}
}
}
```
## Full text search
Every app these days needs search. Enought his often means reaching for something heavy like Solr. While this will work why add complexity to your infrastructure when Postgres has really great
and fast full text search built-in. And since it's part of Postgres it's also available in Super Graph.
@ -908,11 +1229,45 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
end
```
## API Security
One of the the most common questions I get asked is what happens if a user out on the internet sends queries
that we don't want run. For example how do we stop him from fetching all users or the emails of users. Our answer to this is that it is not an issue as this cannot happen, let me explain.
Super Graph runs in one of two modes `development` or `production`, this is controlled via the config value `production: false` when it's false it's running in development mode and when true, production. In development mode all the **named** queries (including mutations) are saved to the allow list `./config/allow.list`. While in production mode when Super Graph starts only the queries from this allow list file are registered with the database as [prepared statements](https://stackoverflow.com/questions/8263371/how-can-prepared-statements-protect-from-sql-injection-attacks).
Prepared statements are designed by databases to be fast and secure. They protect against all kinds of sql injection attacks and since they are pre-processed and pre-planned they are much faster to run then raw sql queries. Also there's no GraphQL to SQL compiling happening in production mode which makes your queries lighting fast as they are directly sent to the database with almost no overhead.
In short in production only queries listed in the allow list file `./config/allow.list` can be used, all other queries will be blocked.
::: tip How to think about the allow list?
The allow list file is essentially a list of all your exposed API calls and the data that passes within them. It's very easy to build tooling to do things like parsing this file within your tests to ensure fields like `credit_card_no` are not accidently leaked. It's a great way to build compliance tooling and ensure your user data is always safe.
:::
This is an example of a named query, `getUserWithProducts` is the name you've given to this query it can be anything you like but should be unique across all you're queries. Only named queries are saved in the allow list in development mode.
```graphql
query getUserWithProducts {
users {
id
name
products {
id
name
price
}
}
}
```
## Authentication
You can only have one type of auth enabled. You can either pick Rails or JWT.
You can only have one type of auth enabled either Rails or JWT.
### Rails Auth (Devise / Warden)
### Ruby on Rails
Almost all Rails apps use Devise or Warden for authentication. Once the user is
authenticated a session is created with the users ID. The session can either be
@ -964,7 +1319,7 @@ auth:
max_active: 12000
```
### JWT Token Auth
### JWT Tokens
```yaml
auth:
@ -978,19 +1333,78 @@ auth:
public_key_type: ecdsa #rsa
```
For JWT tokens we currently support tokens from a provider like Auth0
or if you have a custom solution then we look for the `user_id` in the
`subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
For JWT tokens we currently support tokens from a provider like Auth0 or if you have a custom solution then we look for the `user_id` in the `subject` claim of of the `id token`. If you pick Auth0 then we derive two variables from the token `user_id` and `user_id_provider` for to use in your filters.
We can get the JWT token either from the `authorization` header where we expect it to be a `bearer` token or if `cookie` is specified then we look there.
For validation a `secret` or a public key (ecdsa or rsa) is required. When using public keys they have to be in a PEM format file.
## Role based Access Control
### HTTP Headers
It's a common usecase for APIs to control what information they return or insert based on the role of the user. For example when fetching a list of users, a normal user can only fetch his own entry while a manager can fetch all the users within a company and an admin user can fetch everyone. Or when creating a new user an an admin user can set a users role while the user himself cannot set or change it. This is called role based access control or RBAC.
```yaml
header:
name: X-AppEngine-QueueName
exists: true
#value: default
```
Super Graph allows you to set access control rules based on dynamically defined roles. You can create as many roles as you wish. The only two default (built-in) roles are `user` for authenticated requests and `anon` for unauthenticated. An authenticated request is one where Super Graph can extract an `user_id` based on the configured authenication method (jwt, rails cookies, etc).
Header auth is usually the best option to authenticate requests to the action endpoints. For example you
might want to use an action to refresh a materalized view every hour and only want a cron service like the Google AppEngine Cron service to make that request in this case a config similar to the one above will do.
The `exists: true` parameter ensures that only the existance of the header is checked not its value. The `value` parameter lets you confirm that the value matches the one assgined to the parameter. This helps in the case you are using a shared secret to protect the endpoint.
### Named Auth
```yaml
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
```
In addition to the default auth configuration you can create additional named auth configurations to be used
with features like `actions`. For example while your main GraphQL endpoint uses JWT for authentication you may want to use a header value to ensure your actions can only be called by clients having access to a shared secret
or security header.
## Actions
Actions is a very useful feature that is currently work in progress. For now the best use case for actions is to
refresh database tables like materialized views or call a database procedure to refresh a cache table, etc. An action creates an http endpoint that anyone can call to have the SQL query executed. The below example will create an endpoint `/api/v1/actions/refresh_leaderboard_users` any request send to that endpoint will cause the sql query to be executed. the `auth_name` points to a named auth that should be used to secure this endpoint. In future we have big plans to allow your own custom code to run using actions.
```yaml
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
```
#### Using CURL to test a query
```bash
# fetch the response json directly from the endpoint using user id 5
curl 'http://localhost:8080/api/v1/graphql' \
-H 'content-type: application/json' \
-H 'X-User-ID: 5' \
--data-binary '{"query":"{ products { name price users { email }}}"}'
```
## Access Control
It's common for APIs to control what information they return or insert based on the role of the user. In Super Graph we have two primary roles `user` and `anon` the first for users where a `user_id` is available the latter for users where it's not.
::: tip
An authenticated request is one where Super Graph can extract an `user_id` based on the configured authentication method (jwt, rails cookies, etc).
:::
The `user` role can be divided up into further roles based on attributes in the database. For example when fetching a list of users, a normal user can only fetch his own entry while an admin can fetch all the users within a company and an admin user can fetch everyone. In some places this is called Attribute based access control. So in way we support both. Role based access control and Attribute based access control.
Super Graph allows you to create roles dynamically using a `roles_query` and ` match` config values.
### Configure RBAC
@ -1029,30 +1443,10 @@ roles:
filters: []
```
This configuration is relatively simple to follow the `roles_query` parameter is the query that
must be run to help figure out a users role. This query can be as complex as you like and include joins with other tables.
This configuration is relatively simple to follow the `roles_query` parameter is the query that must be run to help figure out a users role. This query can be as complex as you like and include joins with other tables.
The individual roles are defined under the `roles` parameter and this includes each table the role has a custom setting for. The role is dynamically matched using the `match` parameter for example in the above case `users.id = 1` means that when the `roles_query` is executed a user with the id `1` willbe assigned the admin role and those that don't match get the `user` role if authenticated successfully or the `anon` role.
This below example would work for SAAS apps where an account (tenant) is usually the top parent table to everything else.
```yaml
roles_query: "SELECT * FROM users JOIN accounts on accounts.id = users.account_id WHERE users.id = $user_id"
roles:
- name: user
tables:
- name: users
...
- name: admin
match: accounts.admin_id = $user_id
tables:
- name: users
query:
filters: [{ accounts: { id: { eq: $account_id } } }]
```
## Remote Joins
It often happens that after fetching some data from the DB we need to call another API to fetch some more data and all this combined into a single JSON response. For example along with a list of users you need their last 5 payments from Stripe. This requires you to query your DB for the users and Stripe for the payments. Super Graph handles all this for you also only the fields you requested from the Stripe API are returned.
@ -1130,6 +1524,23 @@ Even tracing data is availble in the Super Graph web UI if tracing is enabled in
![Query Tracing](/tracing.png "Super Graph Web UI Query Tracing")
## Database Relationships
In most cases you don't need this configuration, Super Graph will discover and learn
the relationship graph within your database automatically. It does this using `Foreign Key` relationships that you have defined in your database schema.
The below configs are only needed in special cases such as when you don't use foreign keys or when you want to create a relationship between two tables where a foreign key is not defined or cannot be defined.
For example in the sample below a relationship is defined between the `tags` column on the `posts` table with the `slug` column on the `tags` table. This cannot be defined as using foreign keys since the `tags` column is of type array `text[]` and Postgres for one does not allow foreign keys with array columns.
```yaml
tables:
- name: posts
columns:
- name: tags
related_to: tags.slug
```
## Configuration files
@ -1149,6 +1560,9 @@ web_ui: true
# debug, info, warn, error, fatal, panic
log_level: "debug"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
# When it's 'false' all queries are saved to the
@ -1224,6 +1638,22 @@ auth:
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
# header:
# name: dnt
# exists: true
# value: localhost:8080
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
database:
type: postgres
host: db
@ -1241,23 +1671,29 @@ database:
# Enable this if you need the user id in triggers, etc
set_user_id: false
# Define variables here that you want to use in filters
# sub-queries must be wrapped in ()
# Define additional variables here to be used with filters
variables:
account_id: "(select account_id from users where id = $user_id)"
admin_account_id: "5"
# Define defaults to for the field key and values below
defaults:
# filters: ["{ user_id: { eq: $user_id } }"]
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
# Create custom actions with their own api endpoints
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
# A request to this url will execute the configured SQL query
# which in this case refreshes a materialized view in the database.
# The auth_name is from one of the configured auths
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
tables:
- name: customers
@ -1329,14 +1765,13 @@ roles:
- updated_at: "now"
delete:
deny: true
block: true
- name: admin
match: id = 1
match: id = 1000
tables:
- name: users
# query:
# filters: ["{ account_id: { _eq: $account_id } }"]
filters: []
```
@ -1372,9 +1807,6 @@ brew install yarn
# yarn install dependencies and build the web ui
(cd web && yarn install && yarn build)
# generate some stuff the go code needs
go generate ./...
# do this the only the time to setup the database
docker-compose run rails_app rake db:create db:migrate db:seed
@ -1383,6 +1815,10 @@ docker-compose up
```
## MIT License
## Learn how the code works
MIT Licensed | Copyright © 2018-present Vikram Rangnekar
[Super Graph codebase explained](https://supergraph.dev/internals.html)
## Apache License 2.0
Apache Public License 2.0 | Copyright © 2018-present Vikram Rangnekar

241
docs/internals.md Normal file
View File

@ -0,0 +1,241 @@
---
sidebar: auto
---
# Super Graph Codebase Explained
Super Graph code is made up of a number of packages. We have done our best to keep each package small and focused. Let us begin by looking at some of these packages.
1. qcode - GraphQL lexer and parser.
2. psql - SQL generator
3. serv - HTTP Endpoint, Configs, CLI, etc
4. rails - Rails cookie and session store decoders
## QCODE
This package contains the core of the GraphQL conpiler it handling the lexing and parsing of the GraphQL query transforming it into an internal representation called
`QCode`.
This is the first step of the compiling process the `func NewCompiler(c Config)` function creates a new instance of this compiler which has it's own config.
Keep in mind QCode has no knowledge of the Database structure it is designed to be a fast GraphQL parser. Care is taken to keep memory allocations to a minimum.
```go
const (
opQuery
opMutate
...
)
type QCode struct {
Type QType
Selects []Select
...
}
type Select struct {
ID int32
ParentID int32
Args map[string]*Node
Name string
FieldName string
Cols []Column
Where *Exp
OrderBy []*OrderBy
DistinctOn []string
Paging Paging
Children []int32
Functions bool
Allowed map[string]struct{}
PresetMap map[string]string
PresetList []string
}
```
But before the incoming GraphQL query can be turned into QCode it must first be tokenzied by the lexer `lex.go`. As the tokenzier walks the bytes of the query it generates tokens `item` structs which are then consumed by the next step the parser `parse.go`.
```go
type item struct {
typ itemType
pos Pos
end Pos
}
```
For exmple a simple query like `query getUser { user { id } }` will be converted into several tokens like below.
```go
item{itemQuery, 0, 4} // query
item{itemName, 6, 12} // getUser
item{itemObjOpen, 16, 20} // {
...
```
These tokens are then fed into the parser `parse.go` the parser does the work of generating an abstract syntax tree (AST) from the tokens. This AST is an internal representation (data structure) and is not exposed outside the package. Sinc the AST is a tree a stack `stack.go` is used to walk the tree and generate the QCode AST. The QCode data structure is also a tree (represented as an array). This is then returned to the caller of the compile function.
```go
type Operation struct {
Type parserType
Name string
Args []Arg
Fields []Field
}
type Field struct {
ID int32
ParentID int32
Name string
Alias string
Args []Arg
Children []int32
}
```
## PSQL
This package is responsible for generating Postgres SQL from the QCode AST. There are various GraphQL query types (Query, Mutation, etc). And several more sub types like single root or multi-root queries, various types of mutations (insert, update delete, bulk insert, etc). This package is designed to be able to generate SQL for all of those types.
In addition to QCode variable data is also passed to the compile function within this package. Variables are decoded to derive what is being inserted and what kind of insert is it single or bulk. This information is not available in the GraphQL query its passed in seperatly via variables. This package is able to put all this together and generate the right SQL code.
The entry point of this package is in `query.go`. The database schema must be passed in the config object when creating a new compiler instance `NewCompiler`. The functions to extract this schema from the database are also part of this package `tables.go`. The `GetTables` functions fetches all the tables from the database and `GetColumns` fetches columns and relationship information.
```go
func NewCompiler(conf Config) *Compiler {
return &Compiler{conf.Schema, conf.Vars}
}
func (co *Compiler) Compile(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
switch qc.Type {
case qcode.QTQuery:
return co.compileQuery(qc, w)
case qcode.QTInsert, qcode.QTUpdate, qcode.QTDelete, qcode.QTUpsert:
return co.compileMutation(qc, w, vars)
}
return 0, fmt.Errorf("Unknown operation type %d", qc.Type)
}
```
GraphQL, input is first converted to QCode.
```graphql
query {
user {
id
}
posts {
title
}
}
```
SQL, in reality the generated SQL is far more complex single it has to be very efficient, leverage the power of Postgres, support RBAC (Role based access control) and all of this must be done in a single SQL query.
```sql
SELECT users.id, posts.title FROM users, posts;
```
## SERV
The `serv` package constains most of code that turns the above compiler into an HTTP service. It also includes authentication middleware, remote join resolvers, config parsering, database migrations and seeding commands.
Another big feature that this package handles is the `allow.list` management code. In production mode parsing the allow list file and registering prepared statements to adding GraphQL queries to this file in development mode.
Currently the following global variables are referrenced across the package. In future I'd prefer to move these into a context struct and pass that around instead.
```go
var (
logger zerolog.Logger // logger for everything but errors
errlog zerolog.Logger // logger for errors includes line numbers
conf *config // parsed config
confPath string // path to the config file
db *pgxpool.Pool // database connection pool
schema *psql.DBSchema // database tables, columns and relationships
qcompile *qcode.Compiler // qcode compiler
pcompile *psql.Compiler // postgres sql compiler
)
```
## Testing
There are several unit tests and benchmark tests `parse_test.go`) included. There are also scripts included for memory `pprof_cpu.sh` and cpu `pprof_cpu.sh` profiling.
```go
// Test to ensure synthetic tables gnerate the correct SQL
func syntheticTables(t *testing.T) {
gql := `query {
me {
email
}
}`
sql := `SELECT json_object_agg('me', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT ) AS "json_row_0")) AS "json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
```
You can run tests within each package or across the entire app. It is usually the fastest to first write a test and then build the feature to satisfy it.
```
go test -v ./...
```
Memory profiling can help find where allocations are happining within the package code.
```bash
$ cd ./psql
$ ./pprof_mem.sh
goos: darwin
goarch: amd64
pkg: github.com/dosco/super-graph/psql
BenchmarkCompile-8 52567 19401 ns/op 3918 B/op 61 allocs/op
BenchmarkCompileParallel-8 219548 5684 ns/op 3938 B/op 61 allocs/op
PASS
ok github.com/dosco/super-graph/psql 2.582s
Type: alloc_space
Time: Nov 29, 2019 at 11:59pm (EST)
Entering interactive mode (type "help" for commands, "o" for options)
(pprof) top
Showing nodes accounting for 880.59MB, 80.63% of 1092.14MB total
Dropped 33 nodes (cum <= 5.46MB)
Showing top 10 nodes out of 35
flat flat% sum% cum cum%
22MB 2.01% 2.01% 903.57MB 82.73% github.com/dosco/super-graph/qcode.(*Compiler).Compile
0 0% 2.01% 862.98MB 79.02% github.com/dosco/super-graph/psql.BenchmarkCompileParallel.func1
0 0% 2.01% 862.98MB 79.02% testing.(*B).RunParallel.func1
461.95MB 42.30% 44.31% 760.53MB 69.64% github.com/dosco/super-graph/qcode.(*Compiler).compileQuery
396.63MB 36.32% 80.63% 396.63MB 36.32% github.com/dosco/super-graph/util.NewStack
0 0% 80.63% 252.07MB 23.08% github.com/dosco/super-graph/qcode.(*Compiler).compileArgs
0 0% 80.63% 228.15MB 20.89% testing.(*B).runN
0 0% 80.63% 227.63MB 20.84% github.com/dosco/super-graph/psql.BenchmarkCompile
0 0% 80.63% 227.63MB 20.84% testing.(*B).launch
0 0% 80.63% 187.04MB 17.13% github.com/dosco/super-graph/psql.(*Compiler).Compile
```
## Benchmarking
Most packages contain benchmark tests to ensure new features don't introduce a significant regression to performance.
```bash
$ cd ./psql
$ go test -v -run=xx -bench=.
goos: darwin
goarch: amd64
pkg: github.com/dosco/super-graph/psql
BenchmarkCompile-8 60775 19076 ns/op 3919 B/op 61 allocs/op
BenchmarkCompileParallel-8 207847 5172 ns/op 3937 B/op 61 allocs/op
PASS
ok github.com/dosco/super-graph/psql 2.530s
```
## Reach out
If you'd like me to explain other parts of the code please reach out over Twitter or Discord. I'll keep adding to this doc as I get time.

View File

@ -63,4 +63,6 @@ Rails.application.configure do
config.web_console.whitelisted_ips = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']
config.hosts << "rails_app"
end

14
go.mod
View File

@ -3,7 +3,7 @@ module github.com/dosco/super-graph
require (
github.com/GeertJohan/go.rice v1.0.0
github.com/Masterminds/semver v1.5.0
github.com/OneOfOne/xxhash v1.2.5 // indirect
github.com/NYTimes/gziphandler v1.1.1
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
github.com/brianvoe/gofakeit v3.18.0+incompatible
@ -12,32 +12,26 @@ require (
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/dlclark/regexp2 v1.2.0 // indirect
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681 // indirect
github.com/dvyukov/go-fuzz v0.0.0-20191206100749-a378175e205c // indirect
github.com/fsnotify/fsnotify v1.4.7
github.com/garyburd/redigo v1.6.0
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
github.com/gobuffalo/flect v0.1.6
github.com/gorilla/websocket v1.4.1
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
github.com/jackc/pgconn v1.0.1
github.com/jackc/pgtype v1.0.1
github.com/jackc/pgx v3.6.0+incompatible
github.com/jackc/pgx/v4 v4.0.1
github.com/jackc/tern v1.8.2
github.com/magiconair/properties v1.8.1 // indirect
github.com/pelletier/go-toml v1.4.0 // indirect
github.com/pkg/errors v0.8.1
github.com/rs/zerolog v1.15.0
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.2.2 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0
github.com/valyala/fasttemplate v1.0.1
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 // indirect
gopkg.in/yaml.v2 v2.2.7 // indirect
)
go 1.13

49
go.sum
View File

@ -1,34 +1,30 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=
github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3 h1:+qz9Ga6l6lKw6fgvk5RMV5HQznSLvI8Zxajwdj4FhFg=
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3/go.mod h1:FlkD11RtgMTYjVuBnb7cxoHmQGqvPpCsr2atC88nl/M=
github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8=
github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.0 h1:Eb1IiuHmi3FhT12NKfqCQXSXRqc4NTMvgJoREemrSt4=
github.com/cespare/xxhash/v2 v2.0.0/go.mod h1:MaMeaVDXZNmTpkOyhVs3/WfjgobkbQgfrVnrr3DyZL0=
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@ -41,6 +37,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=
@ -57,8 +54,8 @@ github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733 h1:cyNc40Dx5YNEO94idePU8rhVd3dn+sd04Arh0kDBAaw=
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681 h1:3WV5aRRj1ELP3RcLlBp/v0WJTuy47OQMkL9GIQq8QEE=
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/dvyukov/go-fuzz v0.0.0-20191206100749-a378175e205c h1:/bXaeEuNG6V0HeyEGw11DYLW5BGsOPlcVRIXbHNUWSo=
github.com/dvyukov/go-fuzz v0.0.0-20191206100749-a378175e205c/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
@ -70,8 +67,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/flect v0.1.1 h1:GTZJjJufv9FxgRs1+0Soo3wj+Md3kTUmTER/YE4uINA=
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
github.com/gobuffalo/flect v0.1.6 h1:D7KWNRFiCknJKA495/e1BO7oxqf8tbieaLv/ehoZ/+g=
github.com/gobuffalo/flect v0.1.6/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
@ -87,8 +82,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@ -100,8 +93,6 @@ github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZb
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
@ -126,9 +117,6 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
github.com/jackc/pgtype v1.0.1 h1:7GWB9n3DdnO3TIbj59wMAE9QcHPL4cy/Bbtk5P1Noow=
github.com/jackc/pgtype v1.0.1/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
github.com/jackc/pgx v0.0.0-20180217033919-55ca9db5d578/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=
github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
@ -138,8 +126,7 @@ github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0f
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.0.0 h1:rbjAshlgKscNa7j0jAM0uNQflis5o2XUogPMVAwtcsM=
github.com/jackc/puddle v1.0.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/tern v1.8.2 h1:+d9eK83fRS0dbf6nt+2tjILYF4FKG1O5xTFB8Lzc66U=
github.com/jackc/tern v1.8.2/go.mod h1:AMppp2oyCT6rYnJHLLMmPWwahfFvdIVi6mr9gH81Nxs=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -163,23 +150,25 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pkg/errors v0.0.0-20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -199,6 +188,7 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
@ -208,22 +198,16 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.0-20160114030619-9c9300901990/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20151218134703-7f60f83a2c81/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@ -247,8 +231,6 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
@ -258,7 +240,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20151201002508-7b85b097bf75/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -303,8 +284,8 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 h1:q9u40nxWT5zRClI/uU9dHCiYGottAg6Nzz4YUQyHxdA=
golang.org/x/sys v0.0.0-20190927073244-c990c680b611/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU=
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
@ -335,4 +316,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

9
jsn/bench.0 Normal file
View File

@ -0,0 +1,9 @@
goos: darwin
goarch: amd64
pkg: github.com/dosco/super-graph/jsn
BenchmarkGet-8 13310 88437 ns/op 3328 B/op 2 allocs/op
BenchmarkFilter-8 182232 6922 ns/op 448 B/op 1 allocs/op
BenchmarkStrip-8 162709 6560 ns/op 224 B/op 1 allocs/op
BenchmarkReplace-8 85846 13996 ns/op 416 B/op 1 allocs/op
PASS
ok github.com/dosco/super-graph/jsn 5.913s

View File

@ -64,7 +64,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
state = expectKeyClose
s = i
case state == expectKeyClose && b[i] == '"':
case state == expectKeyClose && (b[i-1] != '\\' && b[i] == '"'):
state = expectColon
k = b[(s + 1):i]
@ -74,7 +74,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
case state == expectValue && b[i] == '"':
state = expectString
case state == expectString && b[i] == '"':
case state == expectString && (b[i-1] != '\\' && b[i] == '"'):
e = i
case state == expectValue && b[i] == '[':
@ -139,7 +139,7 @@ func Filter(w *bytes.Buffer, b []byte, keys []string) error {
}
if sk > 0 && sk < len(cb) {
_, err = w.Write(cb[sk:len(cb)])
_, err = w.Write(cb[sk:])
} else {
_, err = w.Write(cb)
}

View File

@ -55,6 +55,6 @@ func TestFuzzCrashers(t *testing.T) {
}
for _, f := range crashers {
unifiedTest([]byte(f))
_ = unifiedTest([]byte(f))
}
}

View File

@ -66,7 +66,7 @@ func Get(b []byte, keys [][]byte) []Field {
state = expectKeyClose
s = i
case state == expectKeyClose && b[i] == '"':
case state == expectKeyClose && (b[i-1] != '\\' && b[i] == '"'):
state = expectColon
k = b[(s + 1):i]
@ -77,7 +77,7 @@ func Get(b []byte, keys [][]byte) []Field {
state = expectString
s = i
case state == expectString && b[i] == '"':
case state == expectString && (b[i-1] != '\\' && b[i] == '"'):
e = i
case state == expectValue && b[i] == '[':
@ -130,6 +130,20 @@ func Get(b []byte, keys [][]byte) []Field {
n++
}
if state == expectListClose {
loop:
for j := i + 1; j < len(b); j++ {
switch b[j] {
case ' ', '\t', '\n':
continue
case '{':
break loop
}
i = e
break loop
}
}
state = expectKey
e = 0
}

View File

@ -9,16 +9,16 @@ var (
input1 = `
{
"data": {
"test": { "__twitter_id": "ABCD" },
"test_1a": { "__twitter_id": "ABCD" },
"users": [
{
"id": 1,
"full_name": "Sidney Stroman",
"full_name": "'Sidney St[1]roman'",
"email": "user0@demo.com",
"__twitter_id": "2048666903444506956",
"embed": {
"id": 8,
"full_name": "Caroll Orn Sr.",
"full_name": "Caroll Orn Sr's",
"email": "joannarau@hegmann.io",
"__twitter_id": "ABC123"
"more": [{
@ -37,7 +37,7 @@ var (
"id": 3,
"full_name": "Kenna Cassin",
"email": "user2@demo.com",
"__twitter_id": { "name": "hello", "address": { "work": "1 infinity loop" } }
"__twitter_id": { "name": "\"hellos\"", "address": { "work": "1 infinity loop" } }
},
{
"id": 4,
@ -108,7 +108,7 @@ var (
input2 = `
[{
"id": 1,
"full_name": "Sidney Stroman",
"full_name": "Sidney St[1]roman",
"email": "user0@demo.com",
"__twitter_id": "2048666903444506956",
"something": null,
@ -130,7 +130,7 @@ var (
input3 = `
{
"data": {
"test": { "__twitter_id": "ABCD" },
"test_1a": { "__twitter_id": "ABCD" },
"users": [{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]
}
}`
@ -138,7 +138,7 @@ var (
input4 = `
{ "users" : [{
"id": 1,
"full_name": "Sidney Stroman",
"full_name": "Sidney St[1]roman",
"email": "user0@demo.com",
"__twitter_id": "2048666903444506956",
"embed": {
@ -155,24 +155,26 @@ var (
"email": "user1@demo.com",
"__twitter_id": [{ "name": "hello" }, { "name": "world"}]
}] }`
input5 = `
{"data":{"title":"In September 2018, Slovak police stated that Kuciak was murdered because of his investigative work, and that the murder had been ordered.[9][10] They arrested eight suspects,[11] charging three of them with first-degree murder.[11]","topics":["cpp"]},"a":["1111"]},"thread_slug":"in-september-2018-slovak-police-stated-that-kuciak-7929",}`
)
func TestGet(t *testing.T) {
values := Get([]byte(input1), [][]byte{
[]byte("test_1a"),
[]byte("__twitter_id"),
[]byte("work_email"),
})
expected := []Field{
{[]byte("test_1a"), []byte(`{ "__twitter_id": "ABCD" }`)},
{[]byte("__twitter_id"), []byte(`"ABCD"`)},
{[]byte("__twitter_id"), []byte(`"2048666903444506956"`)},
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
{[]byte("__twitter_id"), []byte(`"more123"`)},
{[]byte("__twitter_id"),
[]byte(`[{ "name": "hello" }, { "name": "world"}]`)},
{[]byte("__twitter_id"),
[]byte(`{ "name": "hello", "address": { "work": "1 infinity loop" } }`),
},
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
{[]byte("__twitter_id"), []byte(`{ "name": "\"hellos\"", "address": { "work": "1 infinity loop" } }`)},
{[]byte("__twitter_id"), []byte(`1234567890`)},
{[]byte("__twitter_id"), []byte(`1.23E`)},
{[]byte("__twitter_id"), []byte(`true`)},
@ -191,11 +193,35 @@ func TestGet(t *testing.T) {
}
for i := range expected {
if bytes.Equal(values[i].Key, expected[i].Key) == false {
if !bytes.Equal(values[i].Key, expected[i].Key) {
t.Error(string(values[i].Key), " != ", string(expected[i].Key))
}
if bytes.Equal(values[i].Value, expected[i].Value) == false {
if !bytes.Equal(values[i].Value, expected[i].Value) {
t.Error(string(values[i].Value), " != ", string(expected[i].Value))
}
}
}
func TestGet1(t *testing.T) {
values := Get([]byte(input5), [][]byte{
[]byte("thread_slug"),
})
expected := []Field{
{[]byte("thread_slug"), []byte(`"in-september-2018-slovak-police-stated-that-kuciak-7929"`)},
}
if len(values) != len(expected) {
t.Fatal("len(values) != len(expected)")
}
for i := range expected {
if !bytes.Equal(values[i].Key, expected[i].Key) {
t.Error(string(values[i].Key), " != ", string(expected[i].Key))
}
if !bytes.Equal(values[i].Value, expected[i].Value) {
t.Error(string(values[i].Value), " != ", string(expected[i].Value))
}
}
@ -225,9 +251,12 @@ func TestValue(t *testing.T) {
func TestFilter1(t *testing.T) {
var b bytes.Buffer
Filter(&b, []byte(input2), []string{"id", "full_name", "embed"})
err := Filter(&b, []byte(input2), []string{"id", "full_name", "embed"})
if err != nil {
t.Error(err)
}
expected := `[{"id": 1,"full_name": "Sidney Stroman","embed": {"id": 8,"full_name": "Caroll Orn Sr.","email": "joannarau@hegmann.io","__twitter_id": "ABC123"}},{"id": 2,"full_name": "Jerry Dickinson"}]`
expected := `[{"id": 1,"full_name": "Sidney St[1]roman","embed": {"id": 8,"full_name": "Caroll Orn Sr.","email": "joannarau@hegmann.io","__twitter_id": "ABC123"}},{"id": 2,"full_name": "Jerry Dickinson"}]`
if b.String() != expected {
t.Error("Does not match expected json")
@ -238,7 +267,10 @@ func TestFilter2(t *testing.T) {
value := `[{"id":1,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":100,"amount_refunded":0,"date":"01/01/2019","application":null,"billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}}, {"id":2,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":150,"amount_refunded":0,"date":"02/18/2019","billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}},{"id":3,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":150,"amount_refunded":50,"date":"03/21/2019","billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}}]`
var b bytes.Buffer
Filter(&b, []byte(value), []string{"id"})
err := Filter(&b, []byte(value), []string{"id"})
if err != nil {
t.Error(err)
}
expected := `[{"id":1},{"id":2},{"id":3}]`
@ -253,7 +285,7 @@ func TestStrip(t *testing.T) {
expected := []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)
if bytes.Equal(value1, expected) == false {
if !bytes.Equal(value1, expected) {
t.Log(value1)
t.Error("[Valid path] Does not match expected json")
}
@ -261,7 +293,7 @@ func TestStrip(t *testing.T) {
path2 := [][]byte{[]byte("boo"), []byte("hoo")}
value2 := Strip([]byte(input3), path2)
if bytes.Equal(value2, []byte(input3)) == false {
if !bytes.Equal(value2, []byte(input3)) {
t.Log(value2)
t.Error("[Invalid path] Does not match expected json")
}
@ -300,7 +332,7 @@ func TestReplace(t *testing.T) {
expected := `{ "users" : [{
"id": 1,
"full_name": "Sidney Stroman",
"full_name": "Sidney St[1]roman",
"email": "user0@demo.com",
"__twitter_id": "2048666903444506956",
"embed": {
@ -332,7 +364,7 @@ func TestReplace(t *testing.T) {
func TestReplaceEmpty(t *testing.T) {
var buf bytes.Buffer
json := `{ "users" : [{"id":1,"full_name":"Sidney Stroman","email":"user0@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":2,"full_name":"Jerry Dickinson","email":"user1@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":3,"full_name":"Kenna Cassin","email":"user2@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":4,"full_name":"Mr. Pat Parisian","email":"rodney@kautzer.biz","__users_twitter_id":"2048666903444506956"}, {"id":5,"full_name":"Bette Ebert","email":"janeenrath@goyette.com","__users_twitter_id":"2048666903444506956"}, {"id":6,"full_name":"Everett Kiehn","email":"michael@bartoletti.com","__users_twitter_id":"2048666903444506956"}, {"id":7,"full_name":"Katrina Cronin","email":"loretaklocko@framivolkman.org","__users_twitter_id":"2048666903444506956"}, {"id":8,"full_name":"Caroll Orn Sr.","email":"joannarau@hegmann.io","__users_twitter_id":"2048666903444506956"}, {"id":9,"full_name":"Gwendolyn Ziemann","email":"renaytoy@rutherford.co","__users_twitter_id":"2048666903444506956"}, {"id":10,"full_name":"Mrs. Rosann Fritsch","email":"holliemosciski@thiel.org","__users_twitter_id":"2048666903444506956"}, {"id":11,"full_name":"Arden Koss","email":"cristobalankunding@howewelch.org","__users_twitter_id":"2048666903444506956"}, {"id":12,"full_name":"Brenton Bauch PhD","email":"renee@miller.co","__users_twitter_id":"2048666903444506956"}, {"id":13,"full_name":"Daine Gleichner","email":"andrea@nienow.co","__users_twitter_id":"2048666903444506956"}] }`
json := `{ "users" : [{"id":1,"full_name":"Sidney St[1]roman","email":"user0@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":2,"full_name":"Jerry Dickinson","email":"user1@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":3,"full_name":"Kenna Cassin","email":"user2@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":4,"full_name":"Mr. Pat Parisian","email":"rodney@kautzer.biz","__users_twitter_id":"2048666903444506956"}, {"id":5,"full_name":"Bette Ebert","email":"janeenrath@goyette.com","__users_twitter_id":"2048666903444506956"}, {"id":6,"full_name":"Everett Kiehn","email":"michael@bartoletti.com","__users_twitter_id":"2048666903444506956"}, {"id":7,"full_name":"Katrina Cronin","email":"loretaklocko@framivolkman.org","__users_twitter_id":"2048666903444506956"}, {"id":8,"full_name":"Caroll Orn Sr.","email":"joannarau@hegmann.io","__users_twitter_id":"2048666903444506956"}, {"id":9,"full_name":"Gwendolyn Ziemann","email":"renaytoy@rutherford.co","__users_twitter_id":"2048666903444506956"}, {"id":10,"full_name":"Mrs. Rosann Fritsch","email":"holliemosciski@thiel.org","__users_twitter_id":"2048666903444506956"}, {"id":11,"full_name":"Arden Koss","email":"cristobalankunding@howewelch.org","__users_twitter_id":"2048666903444506956"}, {"id":12,"full_name":"Brenton Bauch PhD","email":"renee@miller.co","__users_twitter_id":"2048666903444506956"}, {"id":13,"full_name":"Daine Gleichner","email":"andrea@nienow.co","__users_twitter_id":"2048666903444506956"}] }`
err := Replace(&buf, []byte(json), []Field{}, []Field{})
if err != nil {
@ -374,10 +406,6 @@ func TestKeys2(t *testing.T) {
"id", "posts", "title", "description", "full_name", "email", "books", "name", "description",
}
// for i := range fields {
// fmt.Println("-->", string(fields[i]))
// }
if len(exp) != len(fields) {
t.Errorf("Expected %d fields %d", len(exp), len(fields))
}
@ -393,7 +421,7 @@ func TestKeys3(t *testing.T) {
json := `{
"insert": {
"created_at": "now",
"test": { "type1": "a", "type2": "b" },
"test_1a": { "type1": "a", "type2": "b" },
"name": "Hello",
"updated_at": "now",
"description": "World"
@ -404,7 +432,7 @@ func TestKeys3(t *testing.T) {
fields := Keys([]byte(json))
exp := []string{
"insert", "created_at", "test", "type1", "type2", "name", "updated_at", "description",
"insert", "created_at", "test_1a", "type1", "type2", "name", "updated_at", "description",
"user",
}

View File

@ -47,7 +47,7 @@ func Keys(b []byte) [][]byte {
state = expectKeyClose
s = i
case state == expectKeyClose && b[i] == '"':
case state == expectKeyClose && (b[i-1] != '\\' && b[i] == '"'):
state = expectColon
k = b[(s + 1):i]
@ -58,7 +58,7 @@ func Keys(b []byte) [][]byte {
state = expectString
s = i
case state == expectString && b[i] == '"':
case state == expectString && (b[i-1] != '\\' && b[i] == '"'):
e = i
case state == expectValue && b[i] == '{':
@ -111,6 +111,19 @@ func Keys(b []byte) [][]byte {
res = append(res, k)
}
if state == expectListClose {
loop:
for j := i + 1; j < len(b); j++ {
switch b[j] {
case ' ', '\t', '\n':
continue
case '{':
break loop
}
i = e
break loop
}
}
state = expectKey
k = nil
e = 0

View File

@ -16,8 +16,12 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
tmap := make(map[uint64]int, len(from))
for i, f := range from {
h.Write(f.Key)
h.Write(f.Value)
if _, err := h.Write(f.Key); err != nil {
return err
}
if _, err := h.Write(f.Value); err != nil {
return err
}
tmap[h.Sum64()] = i
h.Reset()
@ -48,9 +52,11 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
state = expectKeyClose
s = i
case state == expectKeyClose && b[i] == '"':
case state == expectKeyClose && (b[i-1] != '\\' && b[i] == '"'):
state = expectColon
h.Write(b[(s + 1):i])
if _, err := h.Write(b[(s + 1):i]); err != nil {
return err
}
we = s
case state == expectColon && b[i] == ':':
@ -60,7 +66,7 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
state = expectString
s = i
case state == expectString && b[i] == '"':
case state == expectString && (b[i-1] != '\\' && b[i] == '"'):
e = i
case state == expectValue && b[i] == '[':
@ -106,7 +112,9 @@ func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
if e != 0 {
e++
h.Write(b[s:e])
if _, err := h.Write(b[s:e]); err != nil {
return err
}
n, ok := tmap[h.Sum64()]
h.Reset()

View File

@ -27,7 +27,7 @@ func Strip(b []byte, path [][]byte) []byte {
state = expectKeyClose
s = i
case state == expectKeyClose && b[i] == '"':
case state == expectKeyClose && (b[i-1] != '\\' && b[i] == '"'):
state = expectColon
if pi == len(path) {
pi = 0
@ -44,7 +44,7 @@ func Strip(b []byte, path [][]byte) []byte {
state = expectString
s = i
case state == expectString && b[i] == '"':
case state == expectString && (b[i-1] != '\\' && b[i] == '"'):
e = i
case state == expectValue && b[i] == '[':

View File

@ -5,7 +5,7 @@ import (
"encoding/json"
)
func Tree(v []byte) (map[string]interface{}, bool, error) {
func Tree(v []byte) (map[string]json.RawMessage, bool, error) {
dec := json.NewDecoder(bytes.NewReader(v))
array := false
@ -25,7 +25,7 @@ func Tree(v []byte) (map[string]interface{}, bool, error) {
}
// while the array contains values
var m map[string]interface{}
var m map[string]json.RawMessage
// decode an array value (Message)
err := dec.Decode(&m)

View File

@ -2,7 +2,6 @@ package jsn
import (
"fmt"
"reflect"
"strconv"
"strings"
"unsafe"
@ -333,15 +332,6 @@ func b2s(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
func s2b(s string) []byte {
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
var sh reflect.SliceHeader
sh.Data = strh.Data
sh.Len = strh.Len
sh.Cap = strh.Len
return *(*[]byte)(unsafe.Pointer(&sh))
}
const maxStartEndStringLen = 80
func startEndString(s string) string {

View File

@ -5,5 +5,5 @@ import (
)
func main() {
serv.Init()
serv.Cmd()
}

View File

@ -16,7 +16,7 @@ import (
"github.com/pkg/errors"
)
var migrationPattern = regexp.MustCompile(`\A(\d+)_.+\.sql\z`)
var migrationPattern = regexp.MustCompile(`\A(\d+)_[^\.]+\.sql\z`)
var ErrNoFwMigration = errors.Errorf("no sql in forward migration step")
@ -127,7 +127,7 @@ func FindMigrationsEx(path string, fs MigratorFS) ([]string, error) {
return nil, err
}
mcount := len(paths) + 100
mcount := len(paths)
if n < int64(mcount) {
return nil, fmt.Errorf("Duplicate migration %d", n)
@ -244,7 +244,6 @@ func (m *Migrator) AppendMigration(name, upSQL, downSQL string) {
UpSQL: upSQL,
DownSQL: downSQL,
})
return
}
// Migrate runs pending migrations
@ -315,7 +314,7 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
if err != nil {
return err
}
defer tx.Rollback(ctx)
defer tx.Rollback(ctx) //nolint: errcheck
// Fire on start callback
if m.OnStart != nil {
@ -332,7 +331,9 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
}
// Reset all database connection settings. Important to do before updating version as search_path may have been changed.
tx.Exec(ctx, "reset all")
if _, err := tx.Exec(ctx, "reset all"); err != nil {
return err
}
// Add one to the version
_, err = tx.Exec(ctx, "update "+m.versionTable+" set version=$1", sequence)
@ -352,16 +353,14 @@ func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
}
func (m *Migrator) GetCurrentVersion() (v int32, err error) {
ctx := context.Background()
err = m.conn.QueryRow(context.Background(),
"select version from "+m.versionTable).Scan(&v)
err = m.conn.QueryRow(ctx, "select version from "+m.versionTable).Scan(&v)
return v, err
}
func (m *Migrator) ensureSchemaVersionTableExists() (err error) {
ctx := context.Background()
_, err = m.conn.Exec(ctx, fmt.Sprintf(`
_, err = m.conn.Exec(context.Background(), fmt.Sprintf(`
create table if not exists %s(version int4 not null);
insert into %s(version)

54
psql/fuzz.go Normal file
View File

@ -0,0 +1,54 @@
// +build gofuzz
package psql
import (
"encoding/json"
"github.com/dosco/super-graph/qcode"
)
var (
qcompileTest, _ = qcode.NewCompiler(qcode.Config{})
schema = getTestSchema()
vars = NewVariables(map[string]string{
"admin_account_id": "5",
})
pcompileTest = NewCompiler(Config{
Schema: schema,
Vars: vars,
})
)
// FuzzerEntrypoint for Fuzzbuzz
func Fuzz(data []byte) int {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
qc, err := qcompileTest.Compile([]byte(gql), "user")
if err != nil {
panic("qcompile can't fail")
}
vars := map[string]json.RawMessage{
"data": json.RawMessage(data),
}
_, _, err = pcompileTest.CompileEx(qc, vars)
if err != nil {
return 0
}
return 1
}

196
psql/insert.go Normal file
View File

@ -0,0 +1,196 @@
//nolint:errcheck
package psql
import (
"errors"
"fmt"
"io"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
)
func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
vars Variables, ti *DBTableInfo) (uint32, error) {
insert, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
}
if len(insert) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
}
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
io.WriteString(c.w, qc.ActionVar)
io.WriteString(c.w, `}}' :: json AS j)`)
st := util.NewStack()
st.Push(kvitem{_type: itemInsert, key: ti.Name, val: insert, ti: ti})
for {
if st.Len() == 0 {
break
}
if insert[0] == '[' && st.Len() > 1 {
return 0, errors.New("Nested bulk insert not supported")
}
intf := st.Pop()
switch item := intf.(type) {
case kvitem:
if err := c.handleKVItem(st, item); err != nil {
return 0, err
}
case renitem:
var err error
// if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
// io.WriteString(c.w, ` WHERE false`)
// }
switch item._type {
case itemInsert:
err = c.renderInsertStmt(qc, w, item)
case itemConnect:
err = c.renderConnectStmt(qc, w, item)
case itemUnion:
err = c.renderUnionStmt(w, item)
}
if err != nil {
return 0, err
}
}
}
io.WriteString(c.w, ` `)
return 0, nil
}
func (c *compilerContext) renderInsertStmt(qc *qcode.QCode, w io.Writer, item renitem) error {
ti := item.ti
jt := item.data
sk := nestedInsertRelColumnsMap(item.kvitem)
io.WriteString(c.w, `, `)
renderCteName(w, item.kvitem)
io.WriteString(w, ` AS (`)
io.WriteString(w, `INSERT INTO `)
quoted(w, ti.Name)
io.WriteString(w, ` (`)
renderInsertUpdateColumns(w, qc, jt, ti, sk, false)
renderNestedInsertRelColumns(w, item.kvitem, false)
io.WriteString(w, `)`)
io.WriteString(w, ` SELECT `)
renderInsertUpdateColumns(w, qc, jt, ti, sk, true)
renderNestedInsertRelColumns(w, item.kvitem, true)
io.WriteString(w, ` FROM "_sg_input" i, `)
renderNestedInsertRelTables(w, item.kvitem)
if item.array {
io.WriteString(w, `json_populate_recordset`)
} else {
io.WriteString(w, `json_populate_record`)
}
io.WriteString(w, `(NULL::`)
io.WriteString(w, ti.Name)
if len(item.path) == 0 {
io.WriteString(w, `, i.j) t RETURNING *)`)
} else {
io.WriteString(w, `, i.j->`)
joinPath(w, item.path)
io.WriteString(w, `) t RETURNING *)`)
}
return nil
}
func nestedInsertRelColumnsMap(item kvitem) map[string]struct{} {
sk := make(map[string]struct{}, len(item.items))
if len(item.items) == 0 {
if item.relPC != nil && item.relPC.Type == RelOneToMany {
sk[item.relPC.Right.Col] = struct{}{}
}
} else {
for _, v := range item.items {
if v.relCP.Type == RelOneToMany {
sk[v.relCP.Right.Col] = struct{}{}
}
}
}
return sk
}
func renderNestedInsertRelColumns(w io.Writer, item kvitem, values bool) error {
if len(item.items) == 0 {
if item.relPC != nil && item.relPC.Type == RelOneToMany {
if values {
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
} else {
quoted(w, item.relPC.Right.Col)
}
}
} else {
// Render child foreign key columns if child-to-parent
// relationship is one-to-many
i := 0
for _, v := range item.items {
if v.relCP.Type == RelOneToMany {
if i != 0 {
io.WriteString(w, `, `)
}
if values {
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
} else {
quoted(w, v.relCP.Right.Col)
}
i++
}
}
}
return nil
}
func renderNestedInsertRelTables(w io.Writer, item kvitem) error {
if len(item.items) == 0 {
if item.relPC != nil && item.relPC.Type == RelOneToMany {
quoted(w, item.relPC.Left.Table)
io.WriteString(w, `, `)
}
} else {
// Render tables needed to set values if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v.relCP.Type == RelOneToMany {
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
} else {
quoted(w, v.relCP.Left.Table)
io.WriteString(w, `, `)
}
}
}
}
return nil
}

366
psql/insert_test.go Normal file
View File

@ -0,0 +1,366 @@
package psql
import (
"encoding/json"
"testing"
)
func simpleInsert(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func singleInsert(t *testing.T) {
gql := `mutation {
product(id: 15, insert: $insert) {
id
name
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description", "price", "user_id") SELECT "t"."name", "t"."description", "t"."price", "t"."user_id" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"insert": json.RawMessage(` { "name": "my_name", "price": 6.95, "description": "my_desc", "user_id": 5 }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func bulkInsert(t *testing.T) {
gql := `mutation {
product(name: "test", id: 15, insert: $insert) {
id
name
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{insert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"insert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func simpleInsertWithPresets(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, '{{user_id}}' :: bigint FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"name": "Tomato", "price": 5.76}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedInsertManyToMany(t *testing.T) {
gql := `mutation {
purchase(insert: $data) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "product_id", "customer_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "products"."id", "customers"."id" FROM "_sg_input" i, "products", "customers", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT json_object_agg('purchase', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "product_1_join"."json_1" AS "product", "customer_2_join"."json_2" AS "customer") AS "json_row_0")) AS "json_0" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email") AS "json_row_2")) AS "json_2" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2" LIMIT ('1') :: integer) AS "customer_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "price") SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t RETURNING *), "customers" AS (INSERT INTO "customers" ("full_name", "email") SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t RETURNING *), "purchases" AS (INSERT INTO "purchases" ("sale_type", "quantity", "due_date", "customer_id", "product_id") SELECT "t"."sale_type", "t"."quantity", "t"."due_date", "customers"."id", "products"."id" FROM "_sg_input" i, "customers", "products", json_populate_record(NULL::purchases, i.j) t RETURNING *) SELECT json_object_agg('purchase', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "product_1_join"."json_1" AS "product", "customer_2_join"."json_2" AS "customer") AS "json_row_0")) AS "json_0" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email") AS "json_row_2")) AS "json_2" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2" LIMIT ('1') :: integer) AS "customer_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(` {
"sale_type": "bought",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
`),
}
for i := 0; i < 1000; i++ {
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql1 && string(resSQL) != sql2 {
t.Fatal(errNotExpected)
}
}
}
func nestedInsertOneToMany(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j->'product') t RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "product_1_join"."json_1" AS "product") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedInsertOneToOne(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t RETURNING *), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"hey": {
"now": "what's the matter"
},
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now"
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedInsertOneToManyWithConnect(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
full_name
email
product {
id
name
price
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (INSERT INTO "users" ("full_name", "email", "created_at", "updated_at") SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t RETURNING *), "products" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "product_1_join"."json_1" AS "product") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": { "id": 5 }
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedInsertOneToOneWithConnect(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
name
tags {
id
name
}
user {
id
full_name
email
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user", "tags_2_join"."json_2" AS "tags") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."id" AS "id", "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_0"."tags"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": { "id": 5 }
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedInsertOneToOneWithConnectArray(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
name
user {
id
full_name
email
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id" = ANY((select a::bigint AS list from json_array_elements_text((i.j->'user'->'connect'->>'id')::json) AS a)) LIMIT 1), "products" AS (INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"connect": { "id": [1,2] }
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func TestCompileInsert(t *testing.T) {
t.Run("simpleInsert", simpleInsert)
t.Run("singleInsert", singleInsert)
t.Run("bulkInsert", bulkInsert)
t.Run("simpleInsertWithPresets", simpleInsertWithPresets)
t.Run("nestedInsertManyToMany", nestedInsertManyToMany)
t.Run("nestedInsertOneToMany", nestedInsertOneToMany)
t.Run("nestedInsertOneToOne", nestedInsertOneToOne)
t.Run("nestedInsertOneToManyWithConnect", nestedInsertOneToManyWithConnect)
t.Run("nestedInsertOneToOneWithConnect", nestedInsertOneToOneWithConnect)
t.Run("nestedInsertOneToOneWithConnectArray", nestedInsertOneToOneWithConnectArray)
}

View File

@ -1,14 +1,36 @@
//nolint:errcheck
package psql
import (
"encoding/json"
"errors"
"fmt"
"io"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
)
type itemType int
const (
itemInsert itemType = iota + 1
itemUpdate
itemConnect
itemDisconnect
itemUnion
)
var insertTypes = map[string]itemType{
"connect": itemConnect,
}
var updateTypes = map[string]itemType{
"connect": itemConnect,
"disconnect": itemDisconnect,
}
var noLimit = qcode.Paging{NoLimit: true}
func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables) (uint32, error) {
@ -19,15 +41,11 @@ func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables
c := &compilerContext{w, qc.Selects, co}
root := &qc.Selects[0]
ti, err := c.schema.GetTable(root.Table)
ti, err := c.schema.GetTable(root.Name)
if err != nil {
return 0, err
}
io.WriteString(c.w, `WITH `)
quoted(c.w, ti.Name)
io.WriteString(c.w, ` AS `)
switch qc.Type {
case qcode.QTInsert:
if _, err := c.renderInsert(qc, w, vars, ti); err != nil {
@ -53,8 +71,6 @@ func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables
return 0, errors.New("valid mutations are 'insert', 'update', 'upsert' and 'delete'")
}
io.WriteString(c.w, ` RETURNING *) `)
root.Paging = noLimit
root.DistinctOn = root.DistinctOn[:]
root.OrderBy = root.OrderBy[:]
@ -64,158 +80,363 @@ func (co *Compiler) compileMutation(qc *qcode.QCode, w io.Writer, vars Variables
return c.compileQuery(qc, w)
}
func (c *compilerContext) renderInsert(qc *qcode.QCode, w io.Writer,
vars Variables, ti *DBTableInfo) (uint32, error) {
insert, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("Variable '%s' not defined", qc.ActionVar)
}
jt, array, err := jsn.Tree(insert)
if err != nil {
return 0, err
}
io.WriteString(c.w, `(WITH "input" AS (SELECT {{`)
io.WriteString(c.w, qc.ActionVar)
io.WriteString(c.w, `}}::json AS j) INSERT INTO `)
quoted(c.w, ti.Name)
io.WriteString(c.w, ` (`)
c.renderInsertUpdateColumns(qc, w, jt, ti, false)
io.WriteString(c.w, `)`)
io.WriteString(c.w, ` SELECT `)
c.renderInsertUpdateColumns(qc, w, jt, ti, true)
io.WriteString(c.w, ` FROM input i, `)
if array {
io.WriteString(c.w, `json_populate_recordset`)
} else {
io.WriteString(c.w, `json_populate_record`)
}
io.WriteString(c.w, `(NULL::`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `, i.j) t`)
if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
io.WriteString(c.w, ` WHERE false`)
}
return 0, nil
type kvitem struct {
id int32
_type itemType
_ctype int
key string
path []string
val json.RawMessage
data map[string]json.RawMessage
array bool
ti *DBTableInfo
relCP *DBRel
relPC *DBRel
items []kvitem
}
func (c *compilerContext) renderInsertUpdateColumns(qc *qcode.QCode, w io.Writer,
jt map[string]interface{}, ti *DBTableInfo, values bool) (uint32, error) {
root := &qc.Selects[0]
type renitem struct {
kvitem
array bool
data map[string]json.RawMessage
}
i := 0
for _, cn := range ti.ColumnNames {
if _, ok := jt[cn]; !ok {
// TODO: Handle cases where a column name matches the child table name
// the child path needs to be exluded in the json sent to insert or update
func (c *compilerContext) handleKVItem(st *util.Stack, item kvitem) error {
var data map[string]json.RawMessage
var array bool
var err error
if item.data == nil {
data, array, err = jsn.Tree(item.val)
if err != nil {
return err
}
} else {
data, array = item.data, item.array
}
var unionize bool
id := item.id + 1
item.items = make([]kvitem, 0, len(data))
for k, v := range data {
if v[0] != '{' && v[0] != '[' {
continue
}
if _, ok := root.PresetMap[cn]; ok {
// Get child-to-parent relationship
relCP, err := c.schema.GetRel(k, item.key)
if err != nil {
var ty itemType
var ok bool
switch item._type {
case itemInsert:
ty, ok = insertTypes[k]
case itemUpdate:
ty, ok = updateTypes[k]
}
if ok {
unionize = true
item1 := item
item1._type = ty
item1.id = id
item1.val = v
item.items = append(item.items, item1)
id++
}
// Get parent-to-child relationship
} else if relPC, err := c.schema.GetRel(item.key, k); err == nil {
ti, err := c.schema.GetTable(k)
if err != nil {
return err
}
item1 := kvitem{
id: id,
_type: item._type,
key: k,
val: v,
path: append(item.path, k),
ti: ti,
relCP: relCP,
relPC: relPC,
}
if v[0] == '{' {
item1.data, item1.array, err = jsn.Tree(v)
if err != nil {
return err
}
if v1, ok := item1.data["connect"]; ok && (v1[0] == '{' || v1[0] == '[') {
item1._ctype |= (1 << itemConnect)
}
if v1, ok := item1.data["disconnect"]; ok && (v1[0] == '{' || v1[0] == '[') {
item1._ctype |= (1 << itemDisconnect)
}
}
item.items = append(item.items, item1)
id++
}
}
if unionize {
item._type = itemUnion
}
// For inserts order the children according to
// the creation order required by the parent-to-child
// relationships. For example users need to be created
// before the products they own.
// For updates the order defined in the query must be
// the order used.
switch item._type {
case itemInsert:
for _, v := range item.items {
if v.relPC.Type == RelOneToMany {
st.Push(v)
}
}
st.Push(renitem{kvitem: item, array: array, data: data})
for _, v := range item.items {
if v.relPC.Type == RelOneToOne {
st.Push(v)
}
}
case itemUpdate:
for _, v := range item.items {
if !(v._ctype > 0 && v.relPC.Type == RelOneToOne) {
st.Push(v)
}
}
st.Push(renitem{kvitem: item, array: array, data: data})
for _, v := range item.items {
if v._ctype > 0 && v.relPC.Type == RelOneToOne {
st.Push(v)
}
}
case itemUnion:
st.Push(renitem{kvitem: item, array: array, data: data})
for _, v := range item.items {
st.Push(v)
}
default:
for _, v := range item.items {
st.Push(v)
}
st.Push(renitem{kvitem: item, array: array, data: data})
}
return nil
}
func (c *compilerContext) renderUnionStmt(w io.Writer, item renitem) error {
var connect, disconnect bool
// Render only for parent-to-child relationship of one-to-many
if item.relPC.Type != RelOneToMany {
return nil
}
for _, v := range item.items {
if v._type == itemConnect {
connect = true
} else if v._type == itemDisconnect {
disconnect = true
}
if connect && disconnect {
break
}
}
if connect {
io.WriteString(w, `, `)
if connect && disconnect {
renderCteNameWithSuffix(w, item.kvitem, "c")
} else {
quoted(w, item.ti.Name)
}
io.WriteString(w, ` AS ( UPDATE `)
quoted(w, item.ti.Name)
io.WriteString(w, ` SET `)
quoted(w, item.relPC.Right.Col)
io.WriteString(w, ` = `)
// When setting the id of the connected table in a one-to-many setting
// we always overwrite the value including for array columns
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
io.WriteString(w, ` FROM `)
quoted(w, item.relPC.Left.Table)
io.WriteString(w, ` WHERE`)
i := 0
for _, v := range item.items {
if v._type == itemConnect {
if i != 0 {
io.WriteString(w, ` OR (`)
} else {
io.WriteString(w, ` (`)
}
if err := renderWhereFromJSON(w, v, "connect", v.val); err != nil {
return err
}
io.WriteString(w, `)`)
i++
}
}
io.WriteString(w, ` RETURNING `)
quoted(w, item.ti.Name)
io.WriteString(w, `.*)`)
}
if disconnect {
io.WriteString(w, `, `)
if connect && disconnect {
renderCteNameWithSuffix(w, item.kvitem, "d")
} else {
quoted(w, item.ti.Name)
}
io.WriteString(w, ` AS ( UPDATE `)
quoted(w, item.ti.Name)
io.WriteString(w, ` SET `)
quoted(w, item.relPC.Right.Col)
io.WriteString(w, ` = `)
if item.relPC.Right.Array {
io.WriteString(w, ` array_remove(`)
quoted(w, item.relPC.Right.Col)
io.WriteString(w, `, `)
colWithTable(w, item.relPC.Left.Table, item.relPC.Left.Col)
io.WriteString(w, `)`)
} else {
io.WriteString(w, ` NULL`)
}
io.WriteString(w, ` FROM `)
quoted(w, item.relPC.Left.Table)
io.WriteString(w, ` WHERE`)
i := 0
for _, v := range item.items {
if v._type == itemDisconnect {
if i != 0 {
io.WriteString(w, ` OR (`)
} else {
io.WriteString(w, ` (`)
}
if err := renderWhereFromJSON(w, v, "disconnect", v.val); err != nil {
return err
}
io.WriteString(w, `)`)
i++
}
}
io.WriteString(w, ` RETURNING `)
quoted(w, item.ti.Name)
io.WriteString(w, `.*)`)
}
if connect && disconnect {
io.WriteString(w, `, `)
quoted(w, item.ti.Name)
io.WriteString(w, ` AS (`)
io.WriteString(w, `SELECT * FROM `)
renderCteNameWithSuffix(w, item.kvitem, "c")
io.WriteString(w, ` UNION ALL `)
io.WriteString(w, `SELECT * FROM `)
renderCteNameWithSuffix(w, item.kvitem, "d")
io.WriteString(w, `)`)
}
return nil
}
func renderInsertUpdateColumns(w io.Writer,
qc *qcode.QCode,
jt map[string]json.RawMessage,
ti *DBTableInfo,
skipcols map[string]struct{},
values bool) (uint32, error) {
root := &qc.Selects[0]
renderedCol := false
n := 0
for _, cn := range ti.Columns {
if _, ok := skipcols[cn.Name]; ok {
continue
}
if _, ok := jt[cn.Key]; !ok {
continue
}
if _, ok := root.PresetMap[cn.Key]; ok {
continue
}
if len(root.Allowed) != 0 {
if _, ok := root.Allowed[cn]; !ok {
if _, ok := root.Allowed[cn.Key]; !ok {
continue
}
}
if i != 0 {
io.WriteString(c.w, `, `)
if n != 0 {
io.WriteString(w, `, `)
}
io.WriteString(c.w, `"`)
io.WriteString(c.w, cn)
io.WriteString(c.w, `"`)
i++
}
if i != 0 && len(root.PresetList) != 0 {
io.WriteString(c.w, `, `)
if values {
colWithTable(w, "t", cn.Name)
} else {
quoted(w, cn.Name)
}
if !renderedCol {
renderedCol = true
}
n++
}
for i := range root.PresetList {
cn := root.PresetList[i]
col, ok := ti.Columns[cn]
col, ok := ti.ColMap[cn]
if !ok {
continue
}
if i != 0 {
io.WriteString(c.w, `, `)
if _, ok := skipcols[col.Name]; ok {
continue
}
if i != 0 || n != 0 {
io.WriteString(w, `, `)
}
if values {
io.WriteString(c.w, `'`)
io.WriteString(c.w, root.PresetMap[cn])
io.WriteString(c.w, `' :: `)
io.WriteString(c.w, col.Type)
io.WriteString(w, `'`)
io.WriteString(w, root.PresetMap[cn])
io.WriteString(w, `' :: `)
io.WriteString(w, col.Type)
} else {
io.WriteString(c.w, `"`)
io.WriteString(c.w, cn)
io.WriteString(c.w, `"`)
quoted(w, cn)
}
if !renderedCol {
renderedCol = true
}
}
return 0, nil
}
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
vars Variables, ti *DBTableInfo) (uint32, error) {
root := &qc.Selects[0]
update, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("Variable '%s' not defined", qc.ActionVar)
if len(skipcols) != 0 && renderedCol {
io.WriteString(w, `, `)
}
jt, array, err := jsn.Tree(update)
if err != nil {
return 0, err
}
io.WriteString(c.w, `(WITH "input" AS (SELECT {{`)
io.WriteString(c.w, qc.ActionVar)
io.WriteString(c.w, `}}::json AS j) UPDATE `)
quoted(c.w, ti.Name)
io.WriteString(c.w, ` SET (`)
c.renderInsertUpdateColumns(qc, w, jt, ti, false)
io.WriteString(c.w, `) = (SELECT `)
c.renderInsertUpdateColumns(qc, w, jt, ti, true)
io.WriteString(c.w, ` FROM input i, `)
if array {
io.WriteString(c.w, `json_populate_recordset`)
} else {
io.WriteString(c.w, `json_populate_record`)
}
io.WriteString(c.w, `(NULL::`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `, i.j) t)`)
io.WriteString(c.w, ` WHERE `)
if err := c.renderWhere(root, ti); err != nil {
return 0, err
}
return 0, nil
}
func (c *compilerContext) renderDelete(qc *qcode.QCode, w io.Writer,
vars Variables, ti *DBTableInfo) (uint32, error) {
root := &qc.Selects[0]
io.WriteString(c.w, `(DELETE FROM `)
quoted(c.w, ti.Name)
io.WriteString(c.w, ` WHERE `)
if err := c.renderWhere(root, ti); err != nil {
return 0, err
}
return 0, nil
}
@ -225,7 +446,14 @@ func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
upsert, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("Variable '%s' not defined", qc.ActionVar)
return 0, fmt.Errorf("variable '%s' not defined", qc.ActionVar)
}
if len(upsert) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
}
if ti.PrimaryCol == nil {
return 0, fmt.Errorf("no primary key column found")
}
jt, _, err := jsn.Tree(upsert)
@ -240,23 +468,23 @@ func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
io.WriteString(c.w, ` ON CONFLICT (`)
i := 0
for _, cn := range ti.ColumnNames {
if _, ok := jt[cn]; !ok {
for _, cn := range ti.Columns {
if _, ok := jt[cn.Key]; !ok {
continue
}
if col, ok := ti.Columns[cn]; !ok || !(col.UniqueKey || col.PrimaryKey) {
if col, ok := ti.ColMap[cn.Key]; !ok || !(col.UniqueKey || col.PrimaryKey) {
continue
}
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, cn)
io.WriteString(c.w, cn.Name)
i++
}
if i == 0 {
io.WriteString(c.w, ti.PrimaryCol)
io.WriteString(c.w, ti.PrimaryCol.Name)
}
io.WriteString(c.w, `)`)
@ -271,24 +499,202 @@ func (c *compilerContext) renderUpsert(qc *qcode.QCode, w io.Writer,
io.WriteString(c.w, ` DO UPDATE SET `)
i = 0
for _, cn := range ti.ColumnNames {
if _, ok := jt[cn]; !ok {
for _, cn := range ti.Columns {
if _, ok := jt[cn.Key]; !ok {
continue
}
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, cn)
io.WriteString(c.w, cn.Name)
io.WriteString(c.w, ` = EXCLUDED.`)
io.WriteString(c.w, cn)
io.WriteString(c.w, cn.Name)
i++
}
io.WriteString(c.w, ` RETURNING *) `)
return 0, nil
}
func (c *compilerContext) renderConnectStmt(qc *qcode.QCode, w io.Writer,
item renitem) error {
rel := item.relPC
// Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's primary key
// can be set in the related column on the parent object.
// Eg. Create product and connect a user to it.
if rel.Type != RelOneToOne {
return nil
}
io.WriteString(w, `, "_x_`)
io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, `" AS (SELECT `)
if rel.Left.Array {
io.WriteString(w, `array_agg(DISTINCT `)
quoted(w, rel.Right.Col)
io.WriteString(w, `) AS `)
quoted(w, rel.Right.Col)
} else {
quoted(w, rel.Right.Col)
}
io.WriteString(c.w, ` FROM "_sg_input" i,`)
quoted(c.w, item.ti.Name)
io.WriteString(c.w, ` WHERE `)
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
return err
}
io.WriteString(c.w, ` LIMIT 1)`)
return nil
}
func (c *compilerContext) renderDisconnectStmt(qc *qcode.QCode, w io.Writer,
item renitem) error {
rel := item.relPC
// Render only for parent-to-child relationship of one-to-one
// For this to work the child needs to found first so it's
// null value can beset in the related column on the parent object.
// Eg. Update product and diconnect the user from it.
if rel.Type != RelOneToOne {
return nil
}
io.WriteString(w, `, "_x_`)
io.WriteString(c.w, item.ti.Name)
io.WriteString(c.w, `" AS (`)
if rel.Right.Array {
io.WriteString(c.w, `SELECT `)
quoted(w, rel.Right.Col)
io.WriteString(c.w, ` FROM "_sg_input" i,`)
quoted(c.w, item.ti.Name)
io.WriteString(c.w, ` WHERE `)
if err := renderWhereFromJSON(c.w, item.kvitem, "connect", item.kvitem.val); err != nil {
return err
}
io.WriteString(c.w, ` LIMIT 1))`)
} else {
io.WriteString(c.w, `SELECT * FROM (VALUES(NULL::`)
io.WriteString(w, rel.Right.col.Type)
io.WriteString(c.w, `)) AS LOOKUP(`)
quoted(w, rel.Right.Col)
io.WriteString(c.w, `))`)
}
return nil
}
func renderWhereFromJSON(w io.Writer, item kvitem, key string, val []byte) error {
var kv map[string]json.RawMessage
ti := item.ti
if err := json.Unmarshal(val, &kv); err != nil {
return err
}
i := 0
for k, v := range kv {
col, ok := ti.ColMap[k]
if !ok {
continue
}
if i != 0 {
io.WriteString(w, ` AND `)
}
if v[0] == '[' {
colWithTable(w, ti.Name, k)
if col.Array {
io.WriteString(w, ` && `)
} else {
io.WriteString(w, ` = `)
}
io.WriteString(w, `ANY((select a::`)
io.WriteString(w, col.Type)
io.WriteString(w, ` AS list from json_array_elements_text(`)
renderPathJSON(w, item, key, k)
io.WriteString(w, `::json) AS a))`)
} else if col.Array {
io.WriteString(w, `(`)
renderPathJSON(w, item, key, k)
io.WriteString(w, `)::`)
io.WriteString(w, col.Type)
io.WriteString(w, ` = ANY(`)
colWithTable(w, ti.Name, k)
io.WriteString(w, `)`)
} else {
colWithTable(w, ti.Name, k)
io.WriteString(w, `= (`)
renderPathJSON(w, item, key, k)
io.WriteString(w, `)::`)
io.WriteString(w, col.Type)
}
i++
}
return nil
}
func renderPathJSON(w io.Writer, item kvitem, key1, key2 string) {
io.WriteString(w, `(i.j->`)
joinPath(w, item.path)
io.WriteString(w, `->'`)
io.WriteString(w, key1)
io.WriteString(w, `'->>'`)
io.WriteString(w, key2)
io.WriteString(w, `')`)
}
func renderCteName(w io.Writer, item kvitem) error {
io.WriteString(w, `"`)
io.WriteString(w, item.ti.Name)
if item._type == itemConnect || item._type == itemDisconnect {
io.WriteString(w, `_`)
int2string(w, item.id)
}
io.WriteString(w, `"`)
return nil
}
func renderCteNameWithSuffix(w io.Writer, item kvitem, suffix string) error {
io.WriteString(w, `"`)
io.WriteString(w, item.ti.Name)
io.WriteString(w, `_`)
io.WriteString(w, suffix)
io.WriteString(w, `"`)
return nil
}
func quoted(w io.Writer, identifier string) {
io.WriteString(w, `"`)
io.WriteString(w, identifier)
io.WriteString(w, `"`)
}
func joinPath(w io.Writer, path []string) {
for i := range path {
if i != 0 {
io.WriteString(w, `->`)
}
io.WriteString(w, `'`)
io.WriteString(w, path[i])
io.WriteString(w, `'`)
}
}

View File

@ -5,77 +5,6 @@ import (
"testing"
)
func simpleInsert(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
}
}`
sql := `WITH "users" AS (WITH "input" AS (SELECT {{data}}::json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id" FROM "users") AS "users_0") AS "done_1337"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func singleInsert(t *testing.T) {
gql := `mutation {
product(id: 15, insert: $insert) {
id
name
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{insert}}::json AS j) INSERT INTO "products" ("name", "description", "user_id") SELECT "name", "description", "user_id" FROM input i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
vars := map[string]json.RawMessage{
"insert": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc", "user_id": 5 }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func bulkInsert(t *testing.T) {
gql := `mutation {
product(name: "test", id: 15, insert: $insert) {
id
name
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{insert}}::json AS j) INSERT INTO "products" ("name", "description") SELECT "name", "description" FROM input i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
vars := map[string]json.RawMessage{
"insert": json.RawMessage(` [{ "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }]`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func singleUpsert(t *testing.T) {
gql := `mutation {
product(upsert: $upsert) {
@ -84,10 +13,10 @@ func singleUpsert(t *testing.T) {
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{upsert}}::json AS j) INSERT INTO "products" ("name", "description") SELECT "name", "description" FROM input i, json_populate_record(NULL::products, i.j) t ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
sql := `WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"upsert": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
@ -108,10 +37,10 @@ func singleUpsertWhere(t *testing.T) {
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{upsert}}::json AS j) INSERT INTO "products" ("name", "description") SELECT "name", "description" FROM input i, json_populate_record(NULL::products, i.j) t ON CONFLICT (id) WHERE (("products"."price") > 3) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
sql := `WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) WHERE (("products"."price") > 3) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"upsert": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
"upsert": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
@ -132,10 +61,10 @@ func bulkUpsert(t *testing.T) {
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{upsert}}::json AS j) INSERT INTO "products" ("name", "description") SELECT "name", "description" FROM input i, json_populate_recordset(NULL::products, i.j) t ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
sql := `WITH "_sg_input" AS (SELECT '{{upsert}}' :: json AS j), "products" AS (INSERT INTO "products" ("name", "description") SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_recordset(NULL::products, i.j) t RETURNING *) ON CONFLICT (id) DO UPDATE SET name = EXCLUDED.name, description = EXCLUDED.description RETURNING *) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"upsert": json.RawMessage(` [{ "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }]`),
"upsert": json.RawMessage(` [{ "name": "my_name", "description": "my_desc" }]`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
@ -148,30 +77,6 @@ func bulkUpsert(t *testing.T) {
}
}
func singleUpdate(t *testing.T) {
gql := `mutation {
product(id: 15, update: $update, where: { id: { eq: 1 } }) {
id
name
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{update}}::json AS j) UPDATE "products" SET ("name", "description") = (SELECT "name", "description" FROM input i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 1) AND (("products"."id") = 15) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
vars := map[string]json.RawMessage{
"update": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func delete(t *testing.T) {
gql := `mutation {
product(delete: true, where: { id: { eq: 1 } }) {
@ -180,10 +85,10 @@ func delete(t *testing.T) {
}
}`
sql := `WITH "products" AS (DELETE FROM "products" WHERE (("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 1) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337"`
sql := `WITH "products" AS (DELETE FROM "products" WHERE (((("products"."price") > 0) AND (("products"."price") < 8)) AND (("products"."id") IS NOT DISTINCT FROM 1)) RETURNING "products".*)SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"update": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
@ -196,111 +101,59 @@ func delete(t *testing.T) {
}
}
func blockedInsert(t *testing.T) {
gql := `mutation {
user(insert: $data) {
id
}
}`
// func blockedInsert(t *testing.T) {
// gql := `mutation {
// user(insert: $data) {
// id
// }
// }`
sql := `WITH "users" AS (WITH "input" AS (SELECT {{data}}::json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t WHERE false RETURNING *) SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id" FROM "users") AS "users_0") AS "done_1337"`
// sql := `WITH "users" AS (WITH "input" AS (SELECT '{{data}}' :: json AS j) INSERT INTO "users" ("full_name", "email") SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
}
// vars := map[string]json.RawMessage{
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
// }
resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
if err != nil {
t.Fatal(err)
}
// resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
// if err != nil {
// t.Fatal(err)
// }
// fmt.Println(string(resSQL))
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
// if string(resSQL) != sql {
// t.Fatal(errNotExpected)
// }
// }
func blockedUpdate(t *testing.T) {
gql := `mutation {
user(where: { id: { lt: 5 } }, update: $data) {
id
email
}
}`
// func blockedUpdate(t *testing.T) {
// gql := `mutation {
// user(where: { id: { lt: 5 } }, update: $data) {
// id
// email
// }
// }`
sql := `WITH "users" AS (WITH "input" AS (SELECT {{data}}::json AS j) UPDATE "users" SET ("full_name", "email") = (SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t) WHERE false RETURNING *) SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id", "users"."email" FROM "users") AS "users_0") AS "done_1337"`
// sql := `WITH "users" AS (WITH "input" AS (SELECT '{{data}}' :: json AS j) UPDATE "users" SET ("full_name", "email") = (SELECT "full_name", "email" FROM input i, json_populate_record(NULL::users, i.j) t) WHERE false RETURNING *) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
}
// vars := map[string]json.RawMessage{
// "data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
// }
resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
if err != nil {
t.Fatal(err)
}
// resSQL, err := compileGQLToPSQL(gql, vars, "bad_dude")
// if err != nil {
// t.Fatal(err)
// }
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func simpleInsertWithPresets(t *testing.T) {
gql := `mutation {
product(insert: $data) {
id
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{data}}::json AS j) INSERT INTO "products" ("name", "price", "created_at", "updated_at", "user_id") SELECT "name", "price", 'now' :: timestamp without time zone, 'now' :: timestamp without time zone, '$user_id' :: bigint FROM input i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id" FROM "products") AS "products_0") AS "done_1337"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"name": "Tomato", "price": 5.76}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func simpleUpdateWithPresets(t *testing.T) {
gql := `mutation {
product(update: $data) {
id
}
}`
sql := `WITH "products" AS (WITH "input" AS (SELECT {{data}}::json AS j) UPDATE "products" SET ("name", "price", "updated_at") = (SELECT "name", "price", 'now' :: timestamp without time zone FROM input i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."user_id") = {{user_id}}) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id" FROM "products") AS "products_0") AS "done_1337"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"name": "Apple", "price": 1.25}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
// if string(resSQL) != sql {
// t.Fatal(errNotExpected)
// }
// }
func TestCompileMutate(t *testing.T) {
t.Run("simpleInsert", simpleInsert)
t.Run("singleInsert", singleInsert)
t.Run("bulkInsert", bulkInsert)
t.Run("singleUpdate", singleUpdate)
t.Run("singleUpsert", singleUpsert)
t.Run("singleUpsertWhere", singleUpsertWhere)
t.Run("bulkUpsert", bulkUpsert)
t.Run("delete", delete)
t.Run("blockedInsert", blockedInsert)
t.Run("blockedUpdate", blockedUpdate)
t.Run("simpleInsertWithPresets", simpleInsertWithPresets)
t.Run("simpleUpdateWithPresets", simpleUpdateWithPresets)
// t.Run("blockedInsert", blockedInsert)
// t.Run("blockedUpdate", blockedUpdate)
}

View File

@ -27,8 +27,11 @@ func TestMain(m *testing.M) {
"token",
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("user", "product", qcode.TRConfig{
err = qcompile.AddRole("user", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name", "price", "users", "customers"},
Filters: []string{
@ -54,27 +57,39 @@ func TestMain(m *testing.M) {
},
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("anon", "product", qcode.TRConfig{
err = qcompile.AddRole("anon", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name"},
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("anon1", "product", qcode.TRConfig{
err = qcompile.AddRole("anon1", "product", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "name", "price"},
DisableFunctions: true,
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("user", "users", qcode.TRConfig{
err = qcompile.AddRole("user", "users", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "full_name", "avatar", "email", "products"},
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("bad_dude", "users", qcode.TRConfig{
err = qcompile.AddRole("bad_dude", "users", qcode.TRConfig{
Query: qcode.QueryConfig{
Filters: []string{"false"},
DisableFunctions: true,
@ -86,8 +101,11 @@ func TestMain(m *testing.M) {
Filters: []string{"false"},
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("user", "mes", qcode.TRConfig{
err = qcompile.AddRole("user", "mes", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "full_name", "avatar"},
Filters: []string{
@ -95,8 +113,11 @@ func TestMain(m *testing.M) {
},
},
})
if err != nil {
log.Fatal(err)
}
qcompile.AddRole("user", "customers", qcode.TRConfig{
err = qcompile.AddRole("user", "customers", qcode.TRConfig{
Query: qcode.QueryConfig{
Columns: []string{"id", "email", "full_name", "products"},
},
@ -106,72 +127,10 @@ func TestMain(m *testing.M) {
log.Fatal(err)
}
tables := []*DBTable{
&DBTable{Name: "customers", Type: "table"},
&DBTable{Name: "users", Type: "table"},
&DBTable{Name: "products", Type: "table"},
&DBTable{Name: "purchases", Type: "table"},
}
columns := [][]*DBColumn{
[]*DBColumn{
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
[]*DBColumn{
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
[]*DBColumn{
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
&DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
[]*DBColumn{
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
&DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
&DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
&DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
}
schema := &DBSchema{
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
al: make(map[string]struct{}),
}
aliases := map[string][]string{
"users": []string{"mes"},
}
for i, t := range tables {
schema.updateSchema(t, columns[i], aliases)
}
schema := getTestSchema()
vars := NewVariables(map[string]string{
"account_id": "select account_id from users where id = $user_id",
"admin_account_id": "5",
})
pcompile = NewCompiler(Config{

View File

@ -1,3 +1,4 @@
//nolint:errcheck
package psql
import (
@ -6,16 +7,13 @@ import (
"errors"
"fmt"
"io"
"math"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
)
const (
empty = ""
closeBlock = 500
)
@ -39,13 +37,17 @@ func (c *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
return c.schema.SetRel(child, parent, rel)
}
func (c *Compiler) IDColumn(table string) (string, error) {
t, err := c.schema.GetTable(table)
func (c *Compiler) IDColumn(table string) (*DBColumn, error) {
ti, err := c.schema.GetTable(table)
if err != nil {
return empty, err
return nil, err
}
return t.PrimaryCol, nil
if ti.PrimaryCol == nil {
return nil, fmt.Errorf("no primary key column found")
}
return ti.PrimaryCol, nil
}
type compilerContext struct {
@ -77,30 +79,63 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
}
c := &compilerContext{w, qc.Selects, co}
root := &qc.Selects[0]
multiRoot := (len(qc.Roots) > 1)
ti, err := c.schema.GetTable(root.Table)
if err != nil {
return 0, err
}
st := NewIntStack()
si := 0
st := NewStack()
st.Push(root.ID + closeBlock)
st.Push(root.ID)
if multiRoot {
io.WriteString(c.w, `SELECT row_to_json("json_root") FROM (SELECT `)
//fmt.Fprintf(w, `SELECT json_object_agg('%s', %s) FROM (`,
//root.FieldName, root.Table)
io.WriteString(c.w, `SELECT json_object_agg('`)
io.WriteString(c.w, root.FieldName)
io.WriteString(c.w, `', `)
for _, id := range qc.Roots {
root := qc.Selects[id]
if root.SkipRender {
continue
}
st.Push(root.ID + closeBlock)
st.Push(root.ID)
if si != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, `"sel_`)
int2string(c.w, root.ID)
io.WriteString(c.w, `"."json_`)
int2string(c.w, root.ID)
io.WriteString(c.w, `"`)
alias(c.w, root.FieldName)
si++
}
if si != 0 {
io.WriteString(c.w, ` FROM `)
}
if ti.Singular == false {
io.WriteString(c.w, root.Table)
} else {
io.WriteString(c.w, "sel_json_")
int2string(c.w, root.ID)
root := qc.Selects[0]
if !root.SkipRender {
io.WriteString(c.w, `SELECT json_object_agg(`)
io.WriteString(c.w, `'`)
io.WriteString(c.w, root.FieldName)
io.WriteString(c.w, `', `)
io.WriteString(c.w, `json_`)
int2string(c.w, root.ID)
st.Push(root.ID + closeBlock)
st.Push(root.ID)
io.WriteString(c.w, `) FROM `)
si++
}
}
if si == 0 {
return 0, errors.New("all tables skipped. cannot render query")
}
io.WriteString(c.w, `) FROM (`)
var ignored uint32
@ -114,16 +149,21 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
if id < closeBlock {
sel := &c.s[id]
ti, err := c.schema.GetTable(sel.Table)
if sel.ParentID == -1 {
io.WriteString(c.w, `(`)
}
ti, err := c.schema.GetTable(sel.Name)
if err != nil {
return 0, err
}
if sel.ID != 0 {
if sel.ParentID != -1 {
if err = c.renderLateralJoin(sel); err != nil {
return 0, err
}
}
skipped, err := c.renderSelect(sel, ti)
if err != nil {
return 0, err
@ -135,6 +175,9 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
continue
}
child := &c.s[cid]
if child.SkipRender {
continue
}
st.Push(child.ID + closeBlock)
st.Push(child.ID)
@ -143,7 +186,7 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
} else {
sel := &c.s[(id - closeBlock)]
ti, err := c.schema.GetTable(sel.Table)
ti, err := c.schema.GetTable(sel.Name)
if err != nil {
return 0, err
}
@ -153,21 +196,35 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer) (uint32, error) {
return 0, err
}
if sel.ID != 0 {
if sel.ParentID != -1 {
if err = c.renderLateralJoinClose(sel); err != nil {
return 0, err
}
} else {
io.WriteString(c.w, `)`)
aliasWithID(c.w, `sel`, sel.ID)
if st.Len() != 0 {
io.WriteString(c.w, `, `)
}
}
if len(sel.Args) != 0 {
for _, v := range sel.Args {
qcode.FreeNode(v)
}
}
}
}
io.WriteString(c.w, `)`)
alias(c.w, `done_1337`)
if multiRoot {
io.WriteString(c.w, `) AS "json_root"`)
}
return ignored, nil
}
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column) {
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column, error) {
var skipped uint32
cols := make([]*qcode.Column, 0, len(sel.Cols))
@ -177,62 +234,91 @@ func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (u
colmap[sel.Cols[i].Name] = struct{}{}
}
for i := range sel.OrderBy {
colmap[sel.OrderBy[i].Col] = struct{}{}
}
for _, id := range sel.Children {
child := &c.s[id]
rel, err := c.schema.GetRel(child.Table, ti.Name)
rel, err := c.schema.GetRel(child.Name, ti.Name)
if err != nil {
skipped |= (1 << uint(id))
continue
return 0, nil, err
//skipped |= (1 << uint(id))
//continue
}
switch rel.Type {
case RelOneToMany:
fallthrough
case RelBelongTo:
if _, ok := colmap[rel.Col2]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Col2, FieldName: rel.Col2})
case RelOneToOne, RelOneToMany:
if _, ok := colmap[rel.Right.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Right.Col, FieldName: rel.Right.Col})
colmap[rel.Right.Col] = struct{}{}
}
case RelOneToManyThrough:
if _, ok := colmap[rel.Col1]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Col1, FieldName: rel.Col1})
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
colmap[rel.Left.Col] = struct{}{}
}
case RelEmbedded:
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Left.Col})
colmap[rel.Left.Col] = struct{}{}
}
case RelRemote:
if _, ok := colmap[rel.Col1]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Col1, FieldName: rel.Col2})
if _, ok := colmap[rel.Left.Col]; !ok {
cols = append(cols, &qcode.Column{Table: ti.Name, Name: rel.Left.Col, FieldName: rel.Right.Col})
colmap[rel.Left.Col] = struct{}{}
skipped |= (1 << uint(id))
}
skipped |= (1 << uint(id))
default:
skipped |= (1 << uint(id))
return 0, nil, fmt.Errorf("unknown relationship %s", rel)
//skipped |= (1 << uint(id))
}
}
return skipped, cols
return skipped, cols, nil
}
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint32, error) {
skipped, childCols := c.processChildren(sel, ti)
var rel *DBRel
var err error
if sel.ParentID != -1 {
parent := c.s[sel.ParentID]
rel, err = c.schema.GetRel(ti.Name, parent.Name)
if err != nil {
return 0, err
}
}
skipped, childCols, err := c.processChildren(sel, ti)
if err != nil {
return 0, err
}
hasOrder := len(sel.OrderBy) != 0
// SELECT
if ti.Singular == false {
//fmt.Fprintf(w, `SELECT coalesce(json_agg("%s"`, c.sel.Table)
if !ti.Singular {
//fmt.Fprintf(w, `SELECT coalesce(json_agg("%s"`, c.sel.Name)
io.WriteString(c.w, `SELECT coalesce(json_agg("`)
io.WriteString(c.w, "sel_json_")
io.WriteString(c.w, "json_")
int2string(c.w, sel.ID)
io.WriteString(c.w, `"`)
if hasOrder {
err := c.renderOrderBy(sel, ti)
if err != nil {
return skipped, err
if err := c.renderOrderBy(sel, ti); err != nil {
return 0, err
}
}
//fmt.Fprintf(w, `), '[]') AS "%s" FROM (`, c.sel.Table)
//fmt.Fprintf(w, `), '[]') AS "%s" FROM (`, c.sel.Name)
io.WriteString(c.w, `), '[]')`)
alias(c.w, sel.Table)
aliasWithID(c.w, "json", sel.ID)
io.WriteString(c.w, ` FROM (`)
}
@ -245,8 +331,8 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint
io.WriteString(c.w, `row_to_json((`)
//fmt.Fprintf(w, `SELECT "sel_%d" FROM (SELECT `, c.sel.ID)
io.WriteString(c.w, `SELECT "sel_`)
//fmt.Fprintf(w, `SELECT "%d" FROM (SELECT `, c.sel.ID)
io.WriteString(c.w, `SELECT "json_row_`)
int2string(c.w, sel.ID)
io.WriteString(c.w, `" FROM (SELECT `)
@ -255,18 +341,17 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint
c.renderRemoteRelColumns(sel, ti)
err := c.renderJoinedColumns(sel, ti, skipped)
if err != nil {
if err = c.renderJoinedColumns(sel, ti, skipped); err != nil {
return skipped, err
}
//fmt.Fprintf(w, `) AS "sel_%d"`, c.sel.ID)
//fmt.Fprintf(w, `) AS "%d"`, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, "sel", sel.ID)
aliasWithID(c.w, "json_row", sel.ID)
//fmt.Fprintf(w, `)) AS "%s"`, c.sel.Table)
//fmt.Fprintf(w, `)) AS "%s"`, c.sel.Name)
io.WriteString(c.w, `))`)
aliasWithID(c.w, "sel_json", sel.ID)
aliasWithID(c.w, "json", sel.ID)
// END-ROW-TO-JSON
if hasOrder {
@ -275,7 +360,7 @@ func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint
// END-SELECT
// FROM (SELECT .... )
err = c.renderBaseSelect(sel, ti, childCols, skipped)
err = c.renderBaseSelect(sel, ti, rel, childCols, skipped)
if err != nil {
return skipped, err
}
@ -295,8 +380,8 @@ func (c *compilerContext) renderSelectClose(sel *qcode.Select, ti *DBTableInfo)
}
switch {
case sel.Paging.NoLimit:
break
case ti.Singular:
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
case len(sel.Paging.Limit) != 0:
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
@ -304,8 +389,8 @@ func (c *compilerContext) renderSelectClose(sel *qcode.Select, ti *DBTableInfo)
io.WriteString(c.w, sel.Paging.Limit)
io.WriteString(c.w, `') :: integer`)
case ti.Singular:
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
case sel.Paging.NoLimit:
break
default:
io.WriteString(c.w, ` LIMIT ('20') :: integer`)
@ -318,10 +403,10 @@ func (c *compilerContext) renderSelectClose(sel *qcode.Select, ti *DBTableInfo)
io.WriteString(c.w, `') :: integer`)
}
if ti.Singular == false {
//fmt.Fprintf(w, `) AS "sel_json_agg_%d"`, c.sel.ID)
if !ti.Singular {
//fmt.Fprintf(w, `) AS "json_agg_%d"`, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, "sel_json_agg", sel.ID)
aliasWithID(c.w, "json_agg", sel.ID)
}
return nil
@ -333,16 +418,16 @@ func (c *compilerContext) renderLateralJoin(sel *qcode.Select) error {
}
func (c *compilerContext) renderLateralJoinClose(sel *qcode.Select) error {
//fmt.Fprintf(w, `) AS "%s_%d_join" ON ('true')`, c.sel.Table, c.sel.ID)
//fmt.Fprintf(w, `) AS "%s_%d_join" ON ('true')`, c.sel.Name, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithIDSuffix(c.w, sel.Table, sel.ID, "_join")
aliasWithIDSuffix(c.w, sel.Name, sel.ID, "_join")
io.WriteString(c.w, ` ON ('true')`)
return nil
}
func (c *compilerContext) renderJoin(sel *qcode.Select, ti *DBTableInfo) error {
parent := &c.s[sel.ParentID]
return c.renderJoinByName(ti.Name, parent.Table, parent.ID)
return c.renderJoinByName(ti.Name, parent.Name, parent.ID)
}
func (c *compilerContext) renderJoinByName(table, parent string, id int32) error {
@ -363,17 +448,13 @@ func (c *compilerContext) renderJoinByName(table, parent string, id int32) error
}
//fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
//rel.Through, rel.Through, rel.ColT, c.parent.Table, c.parent.ID, rel.Col1)
//rel.Through, rel.Through, rel.ColT, c.parent.Name, c.parent.ID, rel.Left.Col)
io.WriteString(c.w, ` LEFT OUTER JOIN "`)
io.WriteString(c.w, rel.Through)
io.WriteString(c.w, `" ON ((`)
colWithTable(c.w, rel.Through, rel.ColT)
io.WriteString(c.w, `) = (`)
if id != -1 {
colWithTableID(c.w, pt.Name, id, rel.Col1)
} else {
colWithTable(c.w, pt.Name, rel.Col1)
}
colWithTableID(c.w, pt.Name, id, rel.Left.Col)
io.WriteString(c.w, `))`)
return nil
@ -384,7 +465,7 @@ func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) {
for _, col := range sel.Cols {
n := funcPrefixLen(col.Name)
if n != 0 {
if sel.Functions == false {
if !sel.Functions {
continue
}
if len(sel.Allowed) != 0 {
@ -404,7 +485,7 @@ func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) {
io.WriteString(c.w, ", ")
}
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
//c.sel.Table, c.sel.ID, col.Name, col.FieldName)
//c.sel.Name, c.sel.ID, col.Name, col.FieldName)
colWithTableIDAlias(c.w, ti.Name, sel.ID, col.Name, col.FieldName)
i++
}
@ -416,7 +497,7 @@ func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableI
for _, id := range sel.Children {
child := &c.s[id]
rel, err := c.schema.GetRel(child.Table, sel.Table)
rel, err := c.schema.GetRel(child.Name, sel.Name)
if err != nil || rel.Type != RelRemote {
continue
}
@ -424,101 +505,124 @@ func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableI
io.WriteString(c.w, ", ")
}
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
//c.sel.Table, c.sel.ID, rel.Col1, rel.Col2)
colWithTableID(c.w, ti.Name, sel.ID, rel.Col1)
alias(c.w, rel.Col2)
//c.sel.Name, c.sel.ID, rel.Left.Col, rel.Right.Col)
colWithTableID(c.w, ti.Name, sel.ID, rel.Left.Col)
alias(c.w, rel.Right.Col)
i++
}
}
func (c *compilerContext) renderJoinedColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
colsRendered := len(sel.Cols) != 0
// columns previously rendered
i := len(sel.Cols)
for _, id := range sel.Children {
skipThis := hasBit(skipped, uint32(id))
if colsRendered && !skipThis {
io.WriteString(c.w, ", ")
}
if skipThis {
if hasBit(skipped, uint32(id)) {
continue
}
childSel := &c.s[id]
if childSel.SkipRender {
continue
}
cti, err := c.schema.GetTable(childSel.Table)
if err != nil {
return err
if i != 0 {
io.WriteString(c.w, ", ")
}
//fmt.Fprintf(w, `"%s_%d_join"."%s" AS "%s"`,
//s.Table, s.ID, s.Table, s.FieldName)
if cti.Singular {
io.WriteString(c.w, `"sel_json_`)
int2string(c.w, childSel.ID)
io.WriteString(c.w, `" AS "`)
io.WriteString(c.w, childSel.FieldName)
io.WriteString(c.w, `"`)
} else {
colWithTableIDSuffixAlias(c.w, childSel.Table, childSel.ID,
"_join", childSel.Table, childSel.FieldName)
}
//s.Name, s.ID, s.Name, s.FieldName)
//if cti.Singular {
io.WriteString(c.w, `"`)
io.WriteString(c.w, childSel.Name)
io.WriteString(c.w, `_`)
int2string(c.w, childSel.ID)
io.WriteString(c.w, `_join"."json_`)
int2string(c.w, childSel.ID)
io.WriteString(c.w, `" AS "`)
io.WriteString(c.w, childSel.FieldName)
io.WriteString(c.w, `"`)
i++
}
return nil
}
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo, rel *DBRel,
childCols []*qcode.Column, skipped uint32) error {
var groupBy []int
isRoot := sel.ID == 0
isFil := sel.Where != nil
isRoot := (rel == nil)
isFil := (sel.Where != nil && sel.Where.Op != qcode.OpNop)
isSearch := sel.Args["search"] != nil
isAgg := false
colmap := make(map[string]struct{}, (len(sel.Cols) + len(sel.OrderBy)))
io.WriteString(c.w, ` FROM (SELECT `)
i := 0
for n, col := range sel.Cols {
cn := col.Name
colmap[cn] = struct{}{}
_, isRealCol := ti.Columns[cn]
_, isRealCol := ti.ColMap[cn]
if !isRealCol {
if isSearch {
switch {
case cn == "search_rank":
cn = ti.TSVCol
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[cn]; !ok {
continue
}
}
if ti.TSVCol == nil {
return errors.New("no ts_vector column found")
}
cn = ti.TSVCol.Name
arg := sel.Args["search"]
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `ts_rank("%s"."%s", to_tsquery('%s')) AS %s`,
//c.sel.Table, cn, arg.Val, col.Name)
//fmt.Fprintf(w, `ts_rank("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
//c.sel.Name, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_rank(`)
colWithTable(c.w, ti.Name, cn)
io.WriteString(c.w, `, to_tsquery('`)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `, websearch_to_tsquery('`)
} else {
io.WriteString(c.w, `, to_tsquery('`)
}
io.WriteString(c.w, arg.Val)
io.WriteString(c.w, `')`)
io.WriteString(c.w, `'))`)
alias(c.w, col.Name)
i++
case strings.HasPrefix(cn, "search_headline_"):
cn = cn[16:]
cn1 := cn[16:]
if len(sel.Allowed) != 0 {
if _, ok := sel.Allowed[cn1]; !ok {
continue
}
}
arg := sel.Args["search"]
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `ts_headline("%s"."%s", to_tsquery('%s')) AS %s`,
//c.sel.Table, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_headlinek(`)
colWithTable(c.w, ti.Name, cn)
io.WriteString(c.w, `, to_tsquery('`)
//fmt.Fprintf(w, `ts_headline("%s"."%s", websearch_to_tsquery('%s')) AS %s`,
//c.sel.Name, cn, arg.Val, col.Name)
io.WriteString(c.w, `ts_headline(`)
colWithTable(c.w, ti.Name, cn1)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `, websearch_to_tsquery('`)
} else {
io.WriteString(c.w, `, to_tsquery('`)
}
io.WriteString(c.w, arg.Val)
io.WriteString(c.w, `')`)
io.WriteString(c.w, `'))`)
alias(c.w, col.Name)
i++
@ -549,7 +653,7 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
fn := cn[0 : pl-1]
isAgg = true
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Table, cn, col.Name)
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Name, cn, col.Name)
io.WriteString(c.w, fn)
io.WriteString(c.w, `(`)
colWithTable(c.w, ti.Name, cn1)
@ -561,7 +665,7 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
}
} else {
groupBy = append(groupBy, n)
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Table, cn)
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, cn)
if i != 0 {
io.WriteString(c.w, `, `)
}
@ -571,7 +675,23 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
}
}
for _, ob := range sel.OrderBy {
if _, ok := colmap[ob.Col]; ok {
continue
}
colmap[ob.Col] = struct{}{}
if i != 0 {
io.WriteString(c.w, `, `)
}
colWithTable(c.w, ti.Name, ob.Col)
i++
}
for _, col := range childCols {
if _, ok := colmap[col.Name]; ok {
continue
}
if i != 0 {
io.WriteString(c.w, `, `)
}
@ -583,18 +703,15 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
io.WriteString(c.w, ` FROM `)
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Table)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
c.renderFrom(sel, ti, rel)
// if tn, ok := c.tmap[sel.Table]; ok {
// //fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, c.sel.Table)
// tableWithAlias(c.w, ti.Name, sel.Table)
// if tn, ok := c.tmap[sel.Name]; ok {
// //fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, c.sel.Name)
// tableWithAlias(c.w, ti.Name, sel.Name)
// } else {
// //fmt.Fprintf(w, ` FROM "%s"`, c.sel.Table)
// //fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
// io.WriteString(c.w, `"`)
// io.WriteString(c.w, sel.Table)
// io.WriteString(c.w, sel.Name)
// io.WriteString(c.w, `"`)
// }
@ -612,11 +729,9 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
}
io.WriteString(c.w, ` WHERE (`)
if err := c.renderRelationship(sel, ti); err != nil {
return err
}
if isFil {
io.WriteString(c.w, ` AND `)
if err := c.renderWhere(sel, ti); err != nil {
@ -634,15 +749,15 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Table, c.sel.Cols[id].Name)
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Name, c.sel.Cols[id].Name)
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
}
}
}
switch {
case sel.Paging.NoLimit:
break
case ti.Singular:
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
case len(sel.Paging.Limit) != 0:
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
@ -650,8 +765,8 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
io.WriteString(c.w, sel.Paging.Limit)
io.WriteString(c.w, `') :: integer`)
case ti.Singular:
io.WriteString(c.w, ` LIMIT ('1') :: integer`)
case sel.Paging.NoLimit:
break
default:
io.WriteString(c.w, ` LIMIT ('20') :: integer`)
@ -664,34 +779,77 @@ func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
io.WriteString(c.w, `') :: integer`)
}
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Table, c.sel.ID)
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Name, c.sel.ID)
io.WriteString(c.w, `)`)
aliasWithID(c.w, ti.Name, sel.ID)
return nil
}
func (c *compilerContext) renderFrom(sel *qcode.Select, ti *DBTableInfo, rel *DBRel) error {
if rel != nil && rel.Type == RelEmbedded {
// json_to_recordset('[{"a":1,"b":[1,2,3],"c":"bar"}, {"a":2,"b":[1,2,3],"c":"bar"}]') as x(a int, b text, d text);
io.WriteString(c.w, `"`)
io.WriteString(c.w, rel.Left.Table)
io.WriteString(c.w, `", `)
io.WriteString(c.w, ti.Type)
io.WriteString(c.w, `_to_recordset(`)
colWithTable(c.w, rel.Left.Table, rel.Right.Col)
io.WriteString(c.w, `) AS `)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, `(`)
for i, col := range ti.Columns {
if i != 0 {
io.WriteString(c.w, `, `)
}
io.WriteString(c.w, col.Name)
io.WriteString(c.w, ` `)
io.WriteString(c.w, col.Type)
}
io.WriteString(c.w, `)`)
} else {
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Name)
io.WriteString(c.w, `"`)
io.WriteString(c.w, ti.Name)
io.WriteString(c.w, `"`)
}
return nil
}
func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) {
colsRendered := len(sel.Cols) != 0
//colsRendered := len(sel.Cols) != 0
for i := range sel.OrderBy {
if colsRendered {
//io.WriteString(w, ", ")
io.WriteString(c.w, `, `)
}
//io.WriteString(w, ", ")
io.WriteString(c.w, `, `)
col := sel.OrderBy[i].Col
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d_%s_ob"`,
//c.sel.Table, c.sel.ID, c,
//c.sel.Table, c.sel.ID, c)
//c.sel.Name, c.sel.ID, c,
//c.sel.Name, c.sel.ID, c)
colWithTableID(c.w, ti.Name, sel.ID, col)
io.WriteString(c.w, ` AS `)
tableIDColSuffix(c.w, sel.Table, sel.ID, col, "_ob")
tableIDColSuffix(c.w, sel.Name, sel.ID, col, "_ob")
}
}
func (c *compilerContext) renderRelationship(sel *qcode.Select, ti *DBTableInfo) error {
parent := c.s[sel.ParentID]
return c.renderRelationshipByName(ti.Name, parent.Table, parent.ID)
pti, err := c.schema.GetTable(parent.Name)
if err != nil {
return err
}
return c.renderRelationshipByName(ti.Name, pti.Name, parent.ID)
}
func (c *compilerContext) renderRelationshipByName(table, parent string, id int32) error {
@ -700,53 +858,74 @@ func (c *compilerContext) renderRelationshipByName(table, parent string, id int3
return err
}
switch rel.Type {
case RelBelongTo:
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
//c.sel.Table, rel.Col1, c.parent.Table, c.parent.ID, rel.Col2)
io.WriteString(c.w, `((`)
colWithTable(c.w, table, rel.Col1)
io.WriteString(c.w, `) = (`)
if id != -1 {
colWithTableID(c.w, parent, id, rel.Col2)
} else {
colWithTable(c.w, parent, rel.Col2)
}
io.WriteString(c.w, `))`)
io.WriteString(c.w, `((`)
switch rel.Type {
case RelOneToOne, RelOneToMany:
case RelOneToMany:
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
//c.sel.Table, rel.Col1, c.parent.Table, c.parent.ID, rel.Col2)
io.WriteString(c.w, `((`)
colWithTable(c.w, table, rel.Col1)
io.WriteString(c.w, `) = (`)
if id != -1 {
colWithTableID(c.w, parent, id, rel.Col2)
} else {
colWithTable(c.w, parent, rel.Col2)
//c.sel.Name, rel.Left.Col, c.parent.Name, c.parent.ID, rel.Right.Col)
switch {
case !rel.Left.Array && rel.Right.Array:
colWithTable(c.w, table, rel.Left.Col)
io.WriteString(c.w, `) = any (`)
colWithTableID(c.w, parent, id, rel.Right.Col)
case rel.Left.Array && !rel.Right.Array:
colWithTableID(c.w, parent, id, rel.Right.Col)
io.WriteString(c.w, `) = any (`)
colWithTable(c.w, table, rel.Left.Col)
default:
colWithTable(c.w, table, rel.Left.Col)
io.WriteString(c.w, `) = (`)
colWithTableID(c.w, parent, id, rel.Right.Col)
}
io.WriteString(c.w, `))`)
case RelOneToManyThrough:
// This requires the through table to be joined onto this select
//fmt.Fprintf(w, `(("%s"."%s") = ("%s"."%s"))`,
//c.sel.Table, rel.Col1, rel.Through, rel.Col2)
io.WriteString(c.w, `((`)
colWithTable(c.w, table, rel.Col1)
//c.sel.Name, rel.Left.Col, rel.Through, rel.Right.Col)
switch {
case !rel.Left.Array && rel.Right.Array:
colWithTable(c.w, table, rel.Left.Col)
io.WriteString(c.w, `) = any (`)
colWithTable(c.w, rel.Through, rel.Right.Col)
case rel.Left.Array && !rel.Right.Array:
colWithTable(c.w, rel.Through, rel.Right.Col)
io.WriteString(c.w, `) = any (`)
colWithTable(c.w, table, rel.Left.Col)
default:
colWithTable(c.w, table, rel.Left.Col)
io.WriteString(c.w, `) = (`)
colWithTable(c.w, rel.Through, rel.Right.Col)
}
case RelEmbedded:
colWithTable(c.w, rel.Left.Table, rel.Left.Col)
io.WriteString(c.w, `) = (`)
colWithTable(c.w, rel.Through, rel.Col2)
io.WriteString(c.w, `))`)
colWithTableID(c.w, parent, id, rel.Left.Col)
}
io.WriteString(c.w, `))`)
return nil
}
func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error {
st := util.NewStack()
if sel.Where != nil {
st.Push(sel.Where)
return c.renderExp(sel.Where, ti, false)
}
return nil
}
func (c *compilerContext) renderExp(ex *qcode.Exp, ti *DBTableInfo, skipNested bool) error {
st := util.NewStack()
st.Push(ex)
for {
if st.Len() == 0 {
@ -756,6 +935,14 @@ func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error
intf := st.Pop()
switch val := intf.(type) {
case int32:
switch val {
case '(':
io.WriteString(c.w, `(`)
case ')':
io.WriteString(c.w, `)`)
}
case qcode.ExpOp:
switch val {
case qcode.OpAnd:
@ -777,12 +964,14 @@ func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error
qcode.FreeExp(val)
case qcode.OpAnd, qcode.OpOr:
st.Push(')')
for i := len(val.Children) - 1; i >= 0; i-- {
st.Push(val.Children[i])
if i > 0 {
st.Push(val.Op)
}
}
st.Push('(')
qcode.FreeExp(val)
case qcode.OpNot:
@ -791,16 +980,16 @@ func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error
qcode.FreeExp(val)
default:
if len(val.NestedCols) != 0 {
if !skipNested && len(val.NestedCols) != 0 {
io.WriteString(c.w, `EXISTS `)
if err := c.renderNestedWhere(val, sel, ti); err != nil {
if err := c.renderNestedWhere(val, ti); err != nil {
return err
}
} else {
//fmt.Fprintf(w, `(("%s"."%s") `, c.sel.Table, val.Col)
if err := c.renderOp(val, sel, ti); err != nil {
//fmt.Fprintf(w, `(("%s"."%s") `, c.sel.Name, val.Col)
if err := c.renderOp(val, ti); err != nil {
return err
}
qcode.FreeExp(val)
@ -816,8 +1005,7 @@ func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error
return nil
}
func (c *compilerContext) renderNestedWhere(ex *qcode.Exp, sel *qcode.Select, ti *DBTableInfo) error {
func (c *compilerContext) renderNestedWhere(ex *qcode.Exp, ti *DBTableInfo) error {
for i := 0; i < len(ex.NestedCols)-1; i++ {
cti, err := c.schema.GetTable(ex.NestedCols[i])
if err != nil {
@ -841,6 +1029,14 @@ func (c *compilerContext) renderNestedWhere(ex *qcode.Exp, sel *qcode.Select, ti
return err
}
io.WriteString(c.w, ` AND (`)
if err := c.renderExp(ex, cti, true); err != nil {
return err
}
io.WriteString(c.w, `)`)
}
for i := 0; i < len(ex.NestedCols)-1; i++ {
@ -850,8 +1046,19 @@ func (c *compilerContext) renderNestedWhere(ex *qcode.Exp, sel *qcode.Select, ti
return nil
}
func (c *compilerContext) renderOp(ex *qcode.Exp, sel *qcode.Select, ti *DBTableInfo) error {
func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
var col *DBColumn
var ok bool
if ex.Op == qcode.OpNop {
return nil
}
if len(ex.Col) != 0 {
if col, ok = ti.ColMap[ex.Col]; !ok {
return fmt.Errorf("no column '%s' found ", ex.Col)
}
io.WriteString(c.w, `((`)
colWithTable(c.w, ti.Name, ex.Col)
io.WriteString(c.w, `) `)
@ -859,9 +1066,9 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, sel *qcode.Select, ti *DBTable
switch ex.Op {
case qcode.OpEquals:
io.WriteString(c.w, `=`)
io.WriteString(c.w, `IS NOT DISTINCT FROM`)
case qcode.OpNotEquals:
io.WriteString(c.w, `!=`)
io.WriteString(c.w, `IS DISTINCT FROM`)
case qcode.OpGreaterOrEquals:
io.WriteString(c.w, `>=`)
case qcode.OpLesserOrEquals:
@ -903,23 +1110,30 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, sel *qcode.Select, ti *DBTable
io.WriteString(c.w, `IS NOT NULL)`)
}
return nil
case qcode.OpEqID:
if len(ti.PrimaryCol) == 0 {
if ti.PrimaryCol == nil {
return fmt.Errorf("no primary key column defined for %s", ti.Name)
}
col = ti.PrimaryCol
//fmt.Fprintf(w, `(("%s") =`, c.ti.PrimaryCol)
io.WriteString(c.w, `((`)
colWithTable(c.w, ti.Name, ti.PrimaryCol)
colWithTable(c.w, ti.Name, ti.PrimaryCol.Name)
//io.WriteString(c.w, ti.PrimaryCol)
io.WriteString(c.w, `) =`)
case qcode.OpTsQuery:
if len(ti.TSVCol) == 0 {
if ti.PrimaryCol == nil {
return fmt.Errorf("no tsv column defined for %s", ti.Name)
}
//fmt.Fprintf(w, `(("%s") @@ to_tsquery('%s'))`, c.ti.TSVCol, val.Val)
io.WriteString(c.w, `(("`)
io.WriteString(c.w, ti.TSVCol)
io.WriteString(c.w, `") @@ to_tsquery('`)
//fmt.Fprintf(w, `(("%s") @@ websearch_to_tsquery('%s'))`, c.ti.TSVCol, val.Val)
io.WriteString(c.w, `((`)
colWithTable(c.w, ti.Name, ti.TSVCol.Name)
if c.schema.ver >= 110000 {
io.WriteString(c.w, `) @@ websearch_to_tsquery('`)
} else {
io.WriteString(c.w, `) @@ to_tsquery('`)
}
io.WriteString(c.w, ex.Val)
io.WriteString(c.w, `'))`)
return nil
@ -930,8 +1144,10 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, sel *qcode.Select, ti *DBTable
if ex.Type == qcode.ValList {
c.renderList(ex)
} else if col == nil {
return errors.New("no column found for expression value")
} else {
c.renderVal(ex, c.vars)
c.renderVal(ex, c.vars, col)
}
io.WriteString(c.w, `)`)
@ -948,28 +1164,28 @@ func (c *compilerContext) renderOrderBy(sel *qcode.Select, ti *DBTableInfo) erro
switch ob.Order {
case qcode.OrderAsc:
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC`, sel.Table, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, sel.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` ASC`)
case qcode.OrderDesc:
//fmt.Fprintf(w, `"%s_%d.ob.%s" DESC`, sel.Table, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
//fmt.Fprintf(w, `"%s_%d.ob.%s" DESC`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, sel.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` DESC`)
case qcode.OrderAscNullsFirst:
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC NULLS FIRST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, sel.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` ASC NULLS FIRST`)
case qcode.OrderDescNullsFirst:
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS FIRST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, sel.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` DESC NULLLS FIRST`)
case qcode.OrderAscNullsLast:
//fmt.Fprintf(w, `"%s_%d.ob.%s ASC NULLS LAST`, sel.Table, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
//fmt.Fprintf(w, `"%s_%d.ob.%s ASC NULLS LAST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, sel.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` ASC NULLS LAST`)
case qcode.OrderDescNullsLast:
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Table, sel.ID, ob.Col)
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Name, sel.ID, ob.Col)
tableIDColSuffix(c.w, sel.Name, sel.ID, ob.Col, "_ob")
io.WriteString(c.w, ` DESC NULLS LAST`)
default:
return fmt.Errorf("13: unexpected value %v", ob.Order)
@ -984,7 +1200,7 @@ func (c *compilerContext) renderDistinctOn(sel *qcode.Select, ti *DBTableInfo) {
if i != 0 {
io.WriteString(c.w, `, `)
}
//fmt.Fprintf(w, `"%s_%d.ob.%s"`, c.sel.Table, c.sel.ID, c.sel.DistinctOn[i])
//fmt.Fprintf(w, `"%s_%d.ob.%s"`, c.sel.Name, c.sel.ID, c.sel.DistinctOn[i])
tableIDColSuffix(c.w, ti.Name, sel.ID, sel.DistinctOn[i], "_ob")
}
io.WriteString(c.w, `) `)
@ -1008,7 +1224,7 @@ func (c *compilerContext) renderList(ex *qcode.Exp) {
io.WriteString(c.w, `)`)
}
func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string) {
func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *DBColumn) {
io.WriteString(c.w, ` `)
switch ex.Type {
@ -1025,6 +1241,7 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string) {
io.WriteString(c.w, `'`)
case qcode.ValVar:
io.WriteString(c.w, `'`)
if val, ok := vars[ex.Val]; ok {
io.WriteString(c.w, val)
} else {
@ -1033,6 +1250,8 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string) {
io.WriteString(c.w, ex.Val)
io.WriteString(c.w, `}}`)
}
io.WriteString(c.w, `' :: `)
io.WriteString(c.w, col.Type)
}
//io.WriteString(c.w, `)`)
}
@ -1093,22 +1312,6 @@ func aliasWithIDSuffix(w io.Writer, alias string, id int32, suffix string) {
io.WriteString(w, `"`)
}
func colWithAlias(w io.Writer, col, alias string) {
io.WriteString(w, `"`)
io.WriteString(w, col)
io.WriteString(w, `" AS "`)
io.WriteString(w, alias)
io.WriteString(w, `"`)
}
func tableWithAlias(w io.Writer, table, alias string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
io.WriteString(w, `" AS "`)
io.WriteString(w, alias)
io.WriteString(w, `"`)
}
func colWithTable(w io.Writer, table, col string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
@ -1120,8 +1323,10 @@ func colWithTable(w io.Writer, table, col string) {
func colWithTableID(w io.Writer, table string, id int32, col string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
io.WriteString(w, `_`)
int2string(w, id)
if id >= 0 {
io.WriteString(w, `_`)
int2string(w, id)
}
io.WriteString(w, `"."`)
io.WriteString(w, col)
io.WriteString(w, `"`)
@ -1139,20 +1344,6 @@ func colWithTableIDAlias(w io.Writer, table string, id int32, col, alias string)
io.WriteString(w, `"`)
}
func colWithTableIDSuffixAlias(w io.Writer, table string, id int32,
suffix, col, alias string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
io.WriteString(w, `_`)
int2string(w, id)
io.WriteString(w, suffix)
io.WriteString(w, `"."`)
io.WriteString(w, col)
io.WriteString(w, `" AS "`)
io.WriteString(w, alias)
io.WriteString(w, `"`)
}
func tableIDColSuffix(w io.Writer, table string, id int32, col, suffix string) {
io.WriteString(w, `"`)
io.WriteString(w, table)
@ -1177,7 +1368,7 @@ func int2string(w io.Writer, val int32) {
for val2 > 0 {
temp *= 10
temp += val2 % 10
val2 = int32(math.Floor(float64(val2 / 10)))
val2 = int32(float64(val2 / 10))
}
val3 := temp
@ -1187,11 +1378,3 @@ func int2string(w io.Writer, val int32) {
w.Write([]byte{charset[d]})
}
}
func relID(h *xxhash.Digest, child, parent string) uint64 {
h.WriteString(child)
h.WriteString(parent)
v := h.Sum64()
h.Reset()
return v
}

View File

@ -28,7 +28,7 @@ func withComplexArgs(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0" ORDER BY "products_0_price_ob" DESC), '[]') AS "products" FROM (SELECT DISTINCT ON ("products_0_price_ob") row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0", "products_0"."price" AS "products_0_price_ob" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") < 28) AND (("products"."id") >= 20)) LIMIT ('30') :: integer) AS "products_0" ORDER BY "products_0_price_ob" DESC LIMIT ('30') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0" ORDER BY "products_0_price_ob" DESC), '[]') AS "json_0" FROM (SELECT DISTINCT ON ("products_0_price_ob") row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "json_row_0")) AS "json_0", "products_0"."price" AS "products_0_price_ob" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND ((("products"."id") < 28) AND (("products"."id") >= 20)))) LIMIT ('30') :: integer) AS "products_0" ORDER BY "products_0_price_ob" DESC LIMIT ('30') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -56,7 +56,7 @@ func withWhereMultiOr(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") < 20) OR (("products"."price") > 10) OR NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND ((("products"."price") < 20) OR (("products"."price") > 10) OR NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -82,7 +82,7 @@ func withWhereIsNull(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") > 10) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND ((("products"."price") > 10) AND NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -108,7 +108,7 @@ func withWhereAndList(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") > 10) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND ((("products"."price") > 10) AND NOT (("products"."id") IS NULL)))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -128,7 +128,7 @@ func fetchByID(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 15)) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337"`
sql := `SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND (("products"."id") = 15))) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -142,15 +142,17 @@ func fetchByID(t *testing.T) {
func searchQuery(t *testing.T) {
gql := `query {
products(search: "Imperial") {
products(search: "ale") {
id
name
search_rank
search_headline_description
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("tsv") @@ to_tsquery('Imperial'))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."search_rank" AS "search_rank", "products_0"."search_headline_description" AS "search_headline_description") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", ts_rank("products"."tsv", websearch_to_tsquery('ale')) AS "search_rank", ts_headline("products"."description", websearch_to_tsquery('ale')) AS "search_headline_description" FROM "products" WHERE ((("products"."tsv") @@ websearch_to_tsquery('ale'))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
resSQL, err := compileGQLToPSQL(gql, nil, "admin")
if err != nil {
t.Fatal(err)
}
@ -171,7 +173,7 @@ func oneToMany(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('users', users) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email", "products_1_join"."products" AS "products") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price") AS "sel_1")) AS "sel_json_1" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('users', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."email" AS "email", "products_1_join"."json_1" AS "products") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."email", "users"."id" FROM "users" LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_1"), '[]') AS "json_1" FROM (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id")) AND ((("products"."price") > 0) AND (("products"."price") < 8))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -183,7 +185,7 @@ func oneToMany(t *testing.T) {
}
}
func belongsTo(t *testing.T) {
func oneToManyReverse(t *testing.T) {
gql := `query {
products {
name
@ -194,7 +196,7 @@ func belongsTo(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "users_1_join"."users" AS "users") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "users_1"."email" AS "email") AS "sel_1")) AS "sel_json_1" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "users_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "users_1_join"."json_1" AS "users") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE (((("products"."price") > 0) AND (("products"."price") < 8))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_1"), '[]') AS "json_1" FROM (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1" LIMIT ('20') :: integer) AS "json_agg_1") AS "users_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -206,6 +208,37 @@ func belongsTo(t *testing.T) {
}
}
func oneToManyArray(t *testing.T) {
gql := `
query {
product {
name
price
tags {
id
name
}
}
tags {
name
product {
name
}
}
}`
sql := `SELECT row_to_json("json_root") FROM (SELECT "sel_0"."json_0" AS "tags", "sel_2"."json_2" AS "product" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "products_2"."name" AS "name", "products_2"."price" AS "price", "tags_3_join"."json_3" AS "tags") AS "json_row_2")) AS "json_2" FROM (SELECT "products"."name", "products"."price", "products"."tags" FROM "products" LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_3"), '[]') AS "json_3" FROM (SELECT row_to_json((SELECT "json_row_3" FROM (SELECT "tags_3"."id" AS "id", "tags_3"."name" AS "name") AS "json_row_3")) AS "json_3" FROM (SELECT "tags"."id", "tags"."name" FROM "tags" WHERE ((("tags"."slug") = any ("products_2"."tags"))) LIMIT ('20') :: integer) AS "tags_3" LIMIT ('20') :: integer) AS "json_agg_3") AS "tags_3_join" ON ('true') LIMIT ('1') :: integer) AS "sel_2", (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "tags_0"."name" AS "name", "product_1_join"."json_1" AS "product") AS "json_row_0")) AS "json_0" FROM (SELECT "tags"."name", "tags"."slug" FROM "tags" LIMIT ('20') :: integer) AS "tags_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."name" AS "name") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."name" FROM "products" WHERE ((("tags_0"."slug") = any ("products"."tags"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0") AS "json_root"`
resSQL, err := compileGQLToPSQL(gql, nil, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func manyToMany(t *testing.T) {
gql := `query {
products {
@ -217,7 +250,7 @@ func manyToMany(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "customers_1_join"."customers" AS "customers") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name") AS "sel_1")) AS "sel_json_1" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "customers_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."name" AS "name", "customers_1_join"."json_1" AS "customers") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE (((("products"."price") > 0) AND (("products"."price") < 8))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_1"), '[]') AS "json_1" FROM (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name") AS "json_row_1")) AS "json_1" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1" LIMIT ('20') :: integer) AS "json_agg_1") AS "customers_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -240,7 +273,7 @@ func manyToManyReverse(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('customers', customers) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "products_1_join"."products" AS "products") AS "sel_0")) AS "sel_json_0" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name") AS "sel_1")) AS "sel_json_1" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('customers', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "products_1_join"."json_1" AS "products") AS "json_row_0")) AS "json_0" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_1"), '[]') AS "json_1" FROM (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."name" AS "name") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id")) AND ((("products"."price") > 0) AND (("products"."price") < 8))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -260,7 +293,7 @@ func aggFunction(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE (((("products"."price") > 0) AND (("products"."price") < 8))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -280,7 +313,7 @@ func aggFunctionBlockedByCol(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "anon")
if err != nil {
@ -300,7 +333,7 @@ func aggFunctionDisabled(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "anon1")
if err != nil {
@ -320,7 +353,7 @@ func aggFunctionWithFilter(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") > 10)) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND (("products"."id") > 10))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -339,7 +372,7 @@ func syntheticTables(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('me', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT ) AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = {{user_id}})) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "done_1337"`
sql := `SELECT json_object_agg('me', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT ) AS "json_row_0")) AS "json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") IS NOT DISTINCT FROM '{{user_id}}' :: bigint)) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -359,7 +392,7 @@ func queryWithVariables(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") = {{product_price}}) AND (("products"."id") = {{product_id}})) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337"`
sql := `SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((((("products"."price") > 0) AND (("products"."price") < 8)) AND ((("products"."price") IS NOT DISTINCT FROM '{{product_price}}' :: numeric(7,2)) AND (("products"."id") = '{{product_id}}' :: bigint)))) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -376,7 +409,7 @@ func withWhereOnRelations(t *testing.T) {
users(where: {
not: {
products: {
price: { gt: 3 }
price: { gt: 3 }
}
}
}) {
@ -385,7 +418,7 @@ func withWhereOnRelations(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('users', users) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")))) LIMIT ('20') :: integer) AS "users_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('users', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."email" FROM "users" WHERE (NOT EXISTS (SELECT 1 FROM products WHERE (("products"."user_id") = ("users"."id")) AND ((("products"."price") > 3)))) LIMIT ('20') :: integer) AS "users_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
@ -397,6 +430,89 @@ func withWhereOnRelations(t *testing.T) {
}
}
func multiRoot(t *testing.T) {
gql := `query {
product {
id
name
customer {
email
}
customers {
email
}
}
user {
id
email
}
customer {
id
}
}`
sql := `SELECT row_to_json("json_root") FROM (SELECT "sel_0"."json_0" AS "customer", "sel_1"."json_1" AS "user", "sel_2"."json_2" AS "product" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "products_2"."id" AS "id", "products_2"."name" AS "name", "customers_3_join"."json_3" AS "customers", "customer_4_join"."json_4" AS "customer") AS "json_row_2")) AS "json_2" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE (((("products"."price") > 0) AND (("products"."price") < 8))) LIMIT ('1') :: integer) AS "products_2" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_4" FROM (SELECT "customers_4"."email" AS "email") AS "json_row_4")) AS "json_4" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('1') :: integer) AS "customers_4" LIMIT ('1') :: integer) AS "customer_4_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_3"), '[]') AS "json_3" FROM (SELECT row_to_json((SELECT "json_row_3" FROM (SELECT "customers_3"."email" AS "email") AS "json_row_3")) AS "json_3" FROM (SELECT "customers"."email" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_2"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_3" LIMIT ('20') :: integer) AS "json_agg_3") AS "customers_3_join" ON ('true') LIMIT ('1') :: integer) AS "sel_2", (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "sel_1", (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "customers_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "customers"."id" FROM "customers" LIMIT ('1') :: integer) AS "customers_0" LIMIT ('1') :: integer) AS "sel_0") AS "json_root"`
resSQL, err := compileGQLToPSQL(gql, nil, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func jsonColumnAsTable(t *testing.T) {
gql := `query {
products {
id
name
tag_count {
count
tags {
name
}
}
}
}`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "tag_count_1_join"."json_1" AS "tag_count") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "tag_count_1"."count" AS "count", "tags_2_join"."json_2" AS "tags") AS "json_row_1")) AS "json_1" FROM (SELECT "tag_count"."count", "tag_count"."tag_id" FROM "products", json_to_recordset("products"."tag_count") AS "tag_count"(tag_id bigint, count int) WHERE ((("products"."id") = ("products_0"."id"))) LIMIT ('1') :: integer) AS "tag_count_1" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("json_2"), '[]') AS "json_2" FROM (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "tags_2"."name" AS "name") AS "json_row_2")) AS "json_2" FROM (SELECT "tags"."name" FROM "tags" WHERE ((("tags"."id") = ("tag_count_1"."tag_id"))) LIMIT ('20') :: integer) AS "tags_2" LIMIT ('20') :: integer) AS "json_agg_2") AS "tags_2_join" ON ('true') LIMIT ('1') :: integer) AS "tag_count_1_join" ON ('true') LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func skipUserIDForAnonRole(t *testing.T) {
gql := `query {
products {
id
name
user(where: { id: { eq: $user_id } }) {
id
email
}
}
}`
sql := `SELECT json_object_agg('products', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func blockedQuery(t *testing.T) {
gql := `query {
user(id: 5, where: { id: { gt: 3 } }) {
@ -406,7 +522,7 @@ func blockedQuery(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "done_1337"`
sql := `SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE (false) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "bad_dude")
if err != nil {
@ -426,7 +542,7 @@ func blockedFunctions(t *testing.T) {
}
}`
sql := `SELECT json_object_agg('users', users) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email" FROM "users" WHERE (false) LIMIT ('20') :: integer) AS "users_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337"`
sql := `SELECT json_object_agg('users', json_0) FROM (SELECT coalesce(json_agg("json_0"), '[]') AS "json_0" FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."email" AS "email") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."email" FROM "users" WHERE (false) LIMIT ('20') :: integer) AS "users_0" LIMIT ('20') :: integer) AS "json_agg_0") AS "sel_0"`
resSQL, err := compileGQLToPSQL(gql, nil, "bad_dude")
if err != nil {
@ -445,8 +561,9 @@ func TestCompileQuery(t *testing.T) {
t.Run("withWhereMultiOr", withWhereMultiOr)
t.Run("fetchByID", fetchByID)
t.Run("searchQuery", searchQuery)
t.Run("belongsTo", belongsTo)
t.Run("oneToMany", oneToMany)
t.Run("oneToManyReverse", oneToManyReverse)
t.Run("oneToManyArray", oneToManyArray)
t.Run("manyToMany", manyToMany)
t.Run("manyToManyReverse", manyToManyReverse)
t.Run("aggFunction", aggFunction)
@ -456,6 +573,9 @@ func TestCompileQuery(t *testing.T) {
t.Run("syntheticTables", syntheticTables)
t.Run("queryWithVariables", queryWithVariables)
t.Run("withWhereOnRelations", withWhereOnRelations)
t.Run("multiRoot", multiRoot)
t.Run("jsonColumnAsTable", jsonColumnAsTable)
t.Run("skipUserIDForAnonRole", skipUserIDForAnonRole)
t.Run("blockedQuery", blockedQuery)
t.Run("blockedFunctions", blockedFunctions)
}

375
psql/schema.go Normal file
View File

@ -0,0 +1,375 @@
package psql
import (
"fmt"
"strings"
"github.com/gobuffalo/flect"
)
type DBSchema struct {
ver int
t map[string]*DBTableInfo
rm map[string]map[string]*DBRel
}
type DBTableInfo struct {
Name string
Type string
Singular bool
Columns []DBColumn
PrimaryCol *DBColumn
TSVCol *DBColumn
ColMap map[string]*DBColumn
ColIDMap map[int16]*DBColumn
}
type RelType int
const (
RelOneToOne RelType = iota + 1
RelOneToMany
RelOneToManyThrough
RelEmbedded
RelRemote
)
type DBRel struct {
Type RelType
Through string
ColT string
Left struct {
col *DBColumn
Table string
Col string
Array bool
}
Right struct {
col *DBColumn
Table string
Col string
Array bool
}
}
func NewDBSchema(info *DBInfo, aliases map[string][]string) (*DBSchema, error) {
schema := &DBSchema{
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
}
for i, t := range info.Tables {
err := schema.addTable(t, info.Columns[i], aliases)
if err != nil {
return nil, err
}
}
for i, t := range info.Tables {
err := schema.updateRelationships(t, info.Columns[i])
if err != nil {
return nil, err
}
}
return schema, nil
}
func (s *DBSchema) addTable(
t DBTable, cols []DBColumn, aliases map[string][]string) error {
colmap := make(map[string]*DBColumn, len(cols))
colidmap := make(map[int16]*DBColumn, len(cols))
singular := flect.Singularize(t.Key)
s.t[singular] = &DBTableInfo{
Name: t.Name,
Type: t.Type,
Singular: true,
Columns: cols,
ColMap: colmap,
ColIDMap: colidmap,
}
plural := flect.Pluralize(t.Key)
s.t[plural] = &DBTableInfo{
Name: t.Name,
Type: t.Type,
Singular: false,
Columns: cols,
ColMap: colmap,
ColIDMap: colidmap,
}
if al, ok := aliases[t.Key]; ok {
for i := range al {
k1 := flect.Singularize(al[i])
s.t[k1] = s.t[singular]
k2 := flect.Pluralize(al[i])
s.t[k2] = s.t[plural]
}
}
for i := range cols {
c := &cols[i]
switch {
case c.Type == "tsvector":
s.t[singular].TSVCol = c
s.t[plural].TSVCol = c
case c.PrimaryKey:
s.t[singular].PrimaryCol = c
s.t[plural].PrimaryCol = c
}
colmap[c.Key] = c
colidmap[c.ID] = c
}
return nil
}
func (s *DBSchema) updateRelationships(t DBTable, cols []DBColumn) error {
jcols := make([]DBColumn, 0, len(cols))
ct := t.Key
cti, ok := s.t[ct]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ct)
}
for i := range cols {
c := cols[i]
if len(c.FKeyTable) == 0 {
continue
}
// Foreign key column name
ft := strings.ToLower(c.FKeyTable)
ti, ok := s.t[ft]
if !ok {
return fmt.Errorf("invalid foreign key table '%s'", ft)
}
// This is an embedded relationship like when a json/jsonb column
// is exposed as a table
if c.Name == c.FKeyTable && len(c.FKeyColID) == 0 {
rel := &DBRel{Type: RelEmbedded}
rel.Left.col = cti.PrimaryCol
rel.Left.Table = cti.Name
rel.Left.Col = cti.PrimaryCol.Name
rel.Right.col = &c
rel.Right.Table = ti.Name
rel.Right.Col = c.Name
if err := s.SetRel(ft, ct, rel); err != nil {
return err
}
continue
}
if len(c.FKeyColID) == 0 {
continue
}
// Foreign key column id
fcid := c.FKeyColID[0]
fc, ok := ti.ColIDMap[fcid]
if !ok {
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
fcid, ti.Name)
}
var rel1, rel2 *DBRel
// One-to-many relation between current table and the
// table in the foreign key
if fc.UniqueKey {
rel1 = &DBRel{Type: RelOneToOne}
} else {
rel1 = &DBRel{Type: RelOneToMany}
}
rel1.Left.col = &c
rel1.Left.Table = t.Name
rel1.Left.Col = c.Name
rel1.Left.Array = c.Array
rel1.Right.col = fc
rel1.Right.Table = c.FKeyTable
rel1.Right.Col = fc.Name
rel1.Right.Array = fc.Array
if err := s.SetRel(ct, ft, rel1); err != nil {
return err
}
// One-to-many reverse relation between the foreign key table and the
// the current table
if c.UniqueKey {
rel2 = &DBRel{Type: RelOneToOne}
} else {
rel2 = &DBRel{Type: RelOneToMany}
}
rel2.Left.col = fc
rel2.Left.Table = c.FKeyTable
rel2.Left.Col = fc.Name
rel2.Left.Array = fc.Array
rel2.Right.col = &c
rel2.Right.Table = t.Name
rel2.Right.Col = c.Name
rel2.Right.Array = c.Array
if err := s.SetRel(ft, ct, rel2); err != nil {
return err
}
jcols = append(jcols, c)
}
// If table contains multiple foreign key columns it's a possible
// join table for many-to-many relationships or multiple one-to-many
// relations
// Below one-to-many relations use the current table as the
// join table aka through table.
if len(jcols) > 1 {
for i := range jcols {
for n := range jcols {
if n == i {
continue
}
err := s.updateSchemaOTMT(cti, jcols[i], jcols[n])
if err != nil {
return err
}
}
}
}
return nil
}
func (s *DBSchema) updateSchemaOTMT(
ti *DBTableInfo, col1, col2 DBColumn) error {
t1 := strings.ToLower(col1.FKeyTable)
t2 := strings.ToLower(col2.FKeyTable)
fc1, ok := s.t[t1].ColIDMap[col1.FKeyColID[0]]
if !ok {
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
col1.FKeyColID[0], ti.Name)
}
fc2, ok := s.t[t2].ColIDMap[col2.FKeyColID[0]]
if !ok {
return fmt.Errorf("invalid foreign key column id '%d' for table '%s'",
col2.FKeyColID[0], ti.Name)
}
// One-to-many-through relation between 1nd foreign key table and the
// 2nd foreign key table
rel1 := &DBRel{Type: RelOneToManyThrough}
rel1.Through = ti.Name
rel1.ColT = col2.Name
rel1.Left.col = &col2
rel1.Left.Table = col2.FKeyTable
rel1.Left.Col = fc2.Name
rel1.Right.col = &col1
rel1.Right.Table = ti.Name
rel1.Right.Col = col1.Name
if err := s.SetRel(t1, t2, rel1); err != nil {
return err
}
// One-to-many-through relation between 2nd foreign key table and the
// 1nd foreign key table
rel2 := &DBRel{Type: RelOneToManyThrough}
rel2.Through = ti.Name
rel2.ColT = col1.Name
rel1.Left.col = fc1
rel2.Left.Table = col1.FKeyTable
rel2.Left.Col = fc1.Name
rel1.Right.col = &col2
rel2.Right.Table = ti.Name
rel2.Right.Col = col2.Name
if err := s.SetRel(t2, t1, rel2); err != nil {
return err
}
return nil
}
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
t, ok := s.t[table]
if !ok {
return nil, fmt.Errorf("unknown table '%s'", table)
}
return t, nil
}
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
sc := strings.ToLower(flect.Singularize(child))
pc := strings.ToLower(flect.Pluralize(child))
if _, ok := s.rm[sc]; !ok {
s.rm[sc] = make(map[string]*DBRel)
}
if _, ok := s.rm[pc]; !ok {
s.rm[pc] = make(map[string]*DBRel)
}
sp := strings.ToLower(flect.Singularize(parent))
pp := strings.ToLower(flect.Pluralize(parent))
if _, ok := s.rm[sc][sp]; !ok {
s.rm[sc][sp] = rel
}
if _, ok := s.rm[sc][pp]; !ok {
s.rm[sc][pp] = rel
}
if _, ok := s.rm[pc][sp]; !ok {
s.rm[pc][sp] = rel
}
if _, ok := s.rm[pc][pp]; !ok {
s.rm[pc][pp] = rel
}
return nil
}
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
rel, ok := s.rm[child][parent]
if !ok {
// No relationship found so this time fetch the table info
// and try again in case child or parent was an alias
ct, err := s.GetTable(child)
if err != nil {
return nil, err
}
pt, err := s.GetTable(parent)
if err != nil {
return nil, err
}
rel, ok = s.rm[ct.Name][pt.Name]
if !ok {
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
child, parent)
}
}
return rel, nil
}

View File

@ -1,47 +0,0 @@
package psql
type Stack struct {
stA [20]int32
st []int32
top int
}
// Create a new Stack
func NewStack() *Stack {
s := &Stack{top: -1}
s.st = s.stA[:0]
return s
}
// Return the number of items in the Stack
func (s *Stack) Len() int {
return (s.top + 1)
}
// View the top item on the Stack
func (s *Stack) Peek() int32 {
if s.top == -1 {
return -1
}
return s.st[s.top]
}
// Pop the top item of the Stack and return it
func (s *Stack) Pop() int32 {
if s.top == -1 {
return -1
}
s.top--
return s.st[(s.top + 1)]
}
// Push a value onto the top of the Stack
func (s *Stack) Push(value int32) {
s.top++
if len(s.st) <= s.top {
s.st = append(s.st, value)
} else {
s.st[s.top] = value
}
}

47
psql/stack_int.go Normal file
View File

@ -0,0 +1,47 @@
package psql
type IntStack struct {
stA [20]int32
st []int32
top int
}
// Create a new IntStack
func NewIntStack() *IntStack {
s := &IntStack{top: -1}
s.st = s.stA[:0]
return s
}
// Return the number of items in the IntStack
func (s *IntStack) Len() int {
return (s.top + 1)
}
// View the top item on the IntStack
func (s *IntStack) Peek() int32 {
if s.top == -1 {
return -1
}
return s.st[s.top]
}
// Pop the top item of the IntStack and return it
func (s *IntStack) Pop() int32 {
if s.top == -1 {
return -1
}
s.top--
return s.st[(s.top + 1)]
}
// Push a value onto the top of the IntStack
func (s *IntStack) Push(value int32) {
s.top++
if len(s.st) <= s.top {
s.st = append(s.st, value)
} else {
s.st[s.top] = value
}
}

24
psql/strings.go Normal file
View File

@ -0,0 +1,24 @@
package psql
import "fmt"
func (rt RelType) String() string {
switch rt {
case RelOneToOne:
return "one to one"
case RelOneToMany:
return "one to many"
case RelOneToManyThrough:
return "one to many through"
case RelRemote:
return "remote"
case RelEmbedded:
return "embedded"
}
return ""
}
func (re *DBRel) String() string {
return fmt.Sprintf("'%s.%s' --(%s)--> '%s.%s'",
re.Left.Table, re.Left.Col, re.Type, re.Right.Table, re.Right.Col)
}

View File

@ -3,19 +3,92 @@ package psql
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/gobuffalo/flect"
"github.com/jackc/pgtype"
"github.com/jackc/pgx/v4/pgxpool"
)
type DBInfo struct {
Version int
Tables []DBTable
Columns [][]DBColumn
colmap map[string]map[string]*DBColumn
}
func GetDBInfo(db *pgxpool.Pool) (*DBInfo, error) {
di := &DBInfo{}
dbc, err := db.Acquire(context.Background())
if err != nil {
return nil, fmt.Errorf("error acquiring connection from pool: %w", err)
}
defer dbc.Release()
var version string
err = dbc.QueryRow(context.Background(), `SHOW server_version_num`).Scan(&version)
if err != nil {
return nil, fmt.Errorf("error fetching version: %w", err)
}
di.Version, err = strconv.Atoi(version)
if err != nil {
return nil, err
}
di.Tables, err = GetTables(dbc)
if err != nil {
return nil, err
}
di.colmap = make(map[string]map[string]*DBColumn, len(di.Tables))
for i, t := range di.Tables {
cols, err := GetColumns(dbc, "public", t.Name)
if err != nil {
return nil, err
}
di.Columns = append(di.Columns, cols)
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
for n, c := range di.Columns[i] {
di.colmap[t.Key][c.Key] = &di.Columns[i][n]
}
}
return di, nil
}
func (di *DBInfo) AddTable(t DBTable, cols []DBColumn) {
t.ID = di.Tables[len(di.Tables)-1].ID
di.Tables = append(di.Tables, t)
di.colmap[t.Key] = make(map[string]*DBColumn, len(cols))
for i := range cols {
cols[i].ID = int16(i)
c := &cols[i]
di.colmap[t.Key][c.Key] = c
}
di.Columns = append(di.Columns, cols)
}
func (di *DBInfo) GetColumn(table, column string) (*DBColumn, bool) {
v, ok := di.colmap[strings.ToLower(table)][strings.ToLower(column)]
return v, ok
}
type DBTable struct {
ID int
Name string
Key string
Type string
}
func GetTables(dbc *pgxpool.Conn) ([]*DBTable, error) {
func GetTables(dbc *pgxpool.Conn) ([]DBTable, error) {
sqlStmt := `
SELECT
c.relname as "name",
@ -32,7 +105,7 @@ WHERE c.relkind IN ('r','v','m','f','')
AND n.nspname !~ ('^pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid);`
var tables []*DBTable
var tables []DBTable
rows, err := dbc.Query(context.Background(), sqlStmt)
if err != nil {
@ -40,13 +113,16 @@ AND pg_catalog.pg_table_is_visible(c.oid);`
}
defer rows.Close()
for rows.Next() {
t := DBTable{}
for i := 0; rows.Next(); i++ {
t := DBTable{ID: i}
err = rows.Scan(&t.Name, &t.Type)
if err != nil {
return nil, err
}
tables = append(tables, &t)
t.Key = strings.ToLower(t.Name)
if t.Key != "schema_migrations" && t.Key != "ar_internal_metadata" {
tables = append(tables, t)
}
}
return tables, nil
@ -55,7 +131,9 @@ AND pg_catalog.pg_table_is_visible(c.oid);`
type DBColumn struct {
ID int16
Name string
Key string
Type string
Array bool
NotNull bool
PrimaryKey bool
UniqueKey bool
@ -64,13 +142,18 @@ type DBColumn struct {
fKeyColID pgtype.Int2Array
}
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]*DBColumn, error) {
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]DBColumn, error) {
sqlStmt := `
SELECT
f.attnum AS id,
f.attname AS name,
f.attnotnull AS notnull,
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
CASE
WHEN f.attndims != 0 THEN true
WHEN right(pg_catalog.format_type(f.atttypid,f.atttypmod), 2) = '[]' THEN true
ELSE false
END AS array,
CASE
WHEN p.contype = ('p'::char) THEN true
ELSE false
@ -93,7 +176,7 @@ FROM pg_attribute f
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
WHERE c.relkind = ('r'::char)
WHERE c.relkind IN ('r', 'v', 'm', 'f')
AND n.nspname = $1 -- Replace with Schema name
AND c.relname = $2 -- Replace with table name
AND f.attnum > 0
@ -106,12 +189,12 @@ ORDER BY id;`
}
defer rows.Close()
cmap := make(map[int16]*DBColumn)
cmap := make(map[int16]DBColumn)
for rows.Next() {
c := DBColumn{}
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.PrimaryKey, &c.UniqueKey,
&c.FKeyTable, &c.fKeyColID)
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.Array, &c.PrimaryKey, &c.UniqueKey, &c.FKeyTable, &c.fKeyColID)
if err != nil {
return nil, err
}
@ -119,6 +202,7 @@ ORDER BY id;`
if v, ok := cmap[c.ID]; ok {
if c.PrimaryKey {
v.PrimaryKey = true
v.UniqueKey = true
}
if c.NotNull {
v.NotNull = true
@ -126,260 +210,37 @@ ORDER BY id;`
if c.UniqueKey {
v.UniqueKey = true
}
if c.Array {
v.Array = true
}
if len(c.FKeyTable) != 0 {
v.FKeyTable = c.FKeyTable
}
if c.fKeyColID.Elements != nil {
v.fKeyColID = c.fKeyColID
err := v.fKeyColID.AssignTo(&v.FKeyColID)
if err != nil {
return nil, err
}
}
cmap[c.ID] = v
} else {
err := c.fKeyColID.AssignTo(&c.FKeyColID)
if err != nil {
return nil, err
}
cmap[c.ID] = &c
c.Key = strings.ToLower(c.Name)
if c.PrimaryKey {
c.UniqueKey = true
}
cmap[c.ID] = c
}
}
cols := make([]*DBColumn, 0, len(cmap))
for _, v := range cmap {
cols = append(cols, v)
cols := make([]DBColumn, 0, len(cmap))
for i := range cmap {
cols = append(cols, cmap[i])
}
return cols, nil
}
type DBSchema struct {
t map[string]*DBTableInfo
rm map[string]map[string]*DBRel
al map[string]struct{}
}
type DBTableInfo struct {
Name string
Singular bool
PrimaryCol string
TSVCol string
Columns map[string]*DBColumn
ColumnNames []string
}
type RelType int
const (
RelBelongTo RelType = iota + 1
RelOneToMany
RelOneToManyThrough
RelRemote
)
type DBRel struct {
Type RelType
Through string
ColT string
Col1 string
Col2 string
}
func NewDBSchema(db *pgxpool.Pool, aliases map[string][]string) (*DBSchema, error) {
schema := &DBSchema{
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
al: make(map[string]struct{}),
}
dbc, err := db.Acquire(context.Background())
if err != nil {
return nil, fmt.Errorf("error acquiring connection from pool")
}
defer dbc.Release()
tables, err := GetTables(dbc)
if err != nil {
return nil, err
}
for _, t := range tables {
cols, err := GetColumns(dbc, "public", t.Name)
if err != nil {
return nil, err
}
schema.updateSchema(t, cols, aliases)
}
return schema, nil
}
func (s *DBSchema) updateSchema(
t *DBTable,
cols []*DBColumn,
aliases map[string][]string) {
// Foreign key columns in current table
colByID := make(map[int16]*DBColumn)
columns := make(map[string]*DBColumn, len(cols))
colNames := make([]string, 0, len(cols))
for i := range cols {
c := cols[i]
name := strings.ToLower(c.Name)
columns[name] = c
colNames = append(colNames, name)
colByID[c.ID] = c
}
singular := strings.ToLower(flect.Singularize(t.Name))
s.t[singular] = &DBTableInfo{
Name: t.Name,
Singular: true,
Columns: columns,
ColumnNames: colNames,
}
plural := strings.ToLower(flect.Pluralize(t.Name))
s.t[plural] = &DBTableInfo{
Name: t.Name,
Singular: false,
Columns: columns,
ColumnNames: colNames,
}
ct := strings.ToLower(t.Name)
if al, ok := aliases[ct]; ok {
for i := range al {
k1 := flect.Singularize(al[i])
s.t[k1] = s.t[singular]
k2 := flect.Pluralize(al[i])
s.t[k2] = s.t[plural]
s.al[k1] = struct{}{}
s.al[k2] = struct{}{}
}
}
jcols := make([]*DBColumn, 0, len(cols))
for _, c := range cols {
switch {
case c.Type == "tsvector":
s.t[singular].TSVCol = c.Name
s.t[plural].TSVCol = c.Name
case c.PrimaryKey:
s.t[singular].PrimaryCol = c.Name
s.t[plural].PrimaryCol = c.Name
case len(c.FKeyTable) != 0:
if len(c.FKeyColID) == 0 {
continue
}
// Foreign key column name
ft := strings.ToLower(c.FKeyTable)
fc, ok := colByID[c.FKeyColID[0]]
if !ok {
continue
}
// Belongs-to relation between current table and the
// table in the foreign key
rel1 := &DBRel{RelBelongTo, "", "", c.Name, fc.Name}
s.SetRel(ct, ft, rel1)
// One-to-many relation between the foreign key table and the
// the current table
rel2 := &DBRel{RelOneToMany, "", "", fc.Name, c.Name}
s.SetRel(ft, ct, rel2)
jcols = append(jcols, c)
}
}
// If table contains multiple foreign key columns it's a possible
// join table for many-to-many relationships or multiple one-to-many
// relations
// Below one-to-many relations use the current table as the
// join table aka through table.
if len(jcols) > 1 {
for i := range jcols {
for n := range jcols {
if n != i {
s.updateSchemaOTMT(ct, jcols[i], jcols[n], colByID)
}
}
}
}
}
func (s *DBSchema) updateSchemaOTMT(
ct string,
col1, col2 *DBColumn,
colByID map[int16]*DBColumn) {
t1 := strings.ToLower(col1.FKeyTable)
t2 := strings.ToLower(col2.FKeyTable)
fc1, ok := colByID[col1.FKeyColID[0]]
if !ok {
return
}
fc2, ok := colByID[col2.FKeyColID[0]]
if !ok {
return
}
// One-to-many-through relation between 1nd foreign key table and the
// 2nd foreign key table
//rel1 := &DBRel{RelOneToManyThrough, ct, fc1.Name, col1.Name}
rel1 := &DBRel{RelOneToManyThrough, ct, col2.Name, fc2.Name, col1.Name}
s.SetRel(t1, t2, rel1)
// One-to-many-through relation between 2nd foreign key table and the
// 1nd foreign key table
//rel2 := &DBRel{RelOneToManyThrough, ct, col2.Name, fc2.Name}
rel2 := &DBRel{RelOneToManyThrough, ct, col1.Name, fc1.Name, col2.Name}
s.SetRel(t2, t1, rel2)
}
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
t, ok := s.t[table]
if !ok {
return nil, fmt.Errorf("unknown table '%s'", table)
}
return t, nil
}
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
sc := strings.ToLower(flect.Singularize(child))
pc := strings.ToLower(flect.Pluralize(child))
if _, ok := s.rm[sc]; !ok {
s.rm[sc] = make(map[string]*DBRel)
}
if _, ok := s.rm[pc]; !ok {
s.rm[pc] = make(map[string]*DBRel)
}
sp := strings.ToLower(flect.Singularize(parent))
pp := strings.ToLower(flect.Pluralize(parent))
s.rm[sc][sp] = rel
s.rm[sc][pp] = rel
s.rm[pc][sp] = rel
s.rm[pc][pp] = rel
return nil
}
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
rel, ok := s.rm[child][parent]
if !ok {
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
child, parent)
}
return rel, nil
}
func (s *DBSchema) IsAlias(name string) bool {
_, ok := s.al[name]
return ok
}

102
psql/test_schema.go Normal file
View File

@ -0,0 +1,102 @@
package psql
import (
"log"
"strings"
)
func getTestSchema() *DBSchema {
tables := []DBTable{
DBTable{Name: "customers", Type: "table"},
DBTable{Name: "users", Type: "table"},
DBTable{Name: "products", Type: "table"},
DBTable{Name: "purchases", Type: "table"},
DBTable{Name: "tags", Type: "table"},
DBTable{Name: "tag_count", Type: "json"},
}
columns := [][]DBColumn{
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 9, Name: "tags", Type: "text[]", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{3}, Array: true},
DBColumn{ID: 9, Name: "tag_count", Type: "json", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tag_count", FKeyColID: []int16{}}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: true},
DBColumn{ID: 2, Name: "name", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false},
DBColumn{ID: 3, Name: "slug", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false}},
[]DBColumn{
DBColumn{ID: 1, Name: "tag_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "tags", FKeyColID: []int16{1}},
DBColumn{ID: 2, Name: "count", Type: "int", NotNull: false, PrimaryKey: false, UniqueKey: false}},
}
for i := range tables {
tables[i].Key = strings.ToLower(tables[i].Name)
for n := range columns[i] {
columns[i][n].Key = strings.ToLower(columns[i][n].Name)
}
}
schema := &DBSchema{
ver: 110000,
t: make(map[string]*DBTableInfo),
rm: make(map[string]map[string]*DBRel),
}
aliases := map[string][]string{
"users": []string{"mes"},
}
for i, t := range tables {
err := schema.addTable(t, columns[i], aliases)
if err != nil {
log.Fatal(err)
}
}
for i, t := range tables {
err := schema.updateRelationships(t, columns[i])
if err != nil {
log.Fatal(err)
}
}
return schema
}

233
psql/update.go Normal file
View File

@ -0,0 +1,233 @@
//nolint:errcheck
package psql
import (
"errors"
"fmt"
"io"
"github.com/dosco/super-graph/qcode"
"github.com/dosco/super-graph/util"
)
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w io.Writer,
vars Variables, ti *DBTableInfo) (uint32, error) {
update, ok := vars[qc.ActionVar]
if !ok {
return 0, fmt.Errorf("variable '%s' not !defined", qc.ActionVar)
}
if len(update) == 0 {
return 0, fmt.Errorf("variable '%s' is empty", qc.ActionVar)
}
io.WriteString(c.w, `WITH "_sg_input" AS (SELECT '{{`)
io.WriteString(c.w, qc.ActionVar)
io.WriteString(c.w, `}}' :: json AS j)`)
st := util.NewStack()
st.Push(kvitem{_type: itemUpdate, key: ti.Name, val: update, ti: ti})
for {
if st.Len() == 0 {
break
}
if update[0] == '[' && st.Len() > 1 {
return 0, errors.New("Nested bulk update not supported")
}
intf := st.Pop()
switch item := intf.(type) {
case kvitem:
if err := c.handleKVItem(st, item); err != nil {
return 0, err
}
case renitem:
var err error
// if w := qc.Selects[0].Where; w != nil && w.Op == qcode.OpFalse {
// io.WriteString(c.w, ` WHERE false`)
// }
switch item._type {
case itemUpdate:
err = c.renderUpdateStmt(w, qc, item)
case itemConnect:
err = c.renderConnectStmt(qc, w, item)
case itemDisconnect:
err = c.renderDisconnectStmt(qc, w, item)
case itemUnion:
err = c.renderUnionStmt(w, item)
}
if err != nil {
return 0, err
}
}
}
io.WriteString(c.w, ` `)
return 0, nil
}
func (c *compilerContext) renderUpdateStmt(w io.Writer, qc *qcode.QCode, item renitem) error {
ti := item.ti
jt := item.data
sk := nestedUpdateRelColumnsMap(item.kvitem)
io.WriteString(c.w, `, `)
renderCteName(c.w, item.kvitem)
io.WriteString(c.w, ` AS (`)
io.WriteString(w, `UPDATE `)
quoted(w, ti.Name)
io.WriteString(w, ` SET (`)
renderInsertUpdateColumns(w, qc, jt, ti, sk, false)
renderNestedUpdateRelColumns(w, item.kvitem, false)
io.WriteString(w, `) = (SELECT `)
renderInsertUpdateColumns(w, qc, jt, ti, sk, true)
renderNestedUpdateRelColumns(w, item.kvitem, true)
io.WriteString(w, ` FROM "_sg_input" i, `)
renderNestedUpdateRelTables(w, item.kvitem)
if item.array {
io.WriteString(w, `json_populate_recordset`)
} else {
io.WriteString(w, `json_populate_record`)
}
io.WriteString(w, `(NULL::`)
io.WriteString(w, ti.Name)
if len(item.path) == 0 {
io.WriteString(w, `, i.j) t)`)
} else {
io.WriteString(w, `, i.j->`)
joinPath(w, item.path)
io.WriteString(w, `) t) `)
}
if item.id != 0 {
// Render sql to set id values if child-to-parent
// relationship is one-to-one
rel := item.relCP
io.WriteString(w, `FROM `)
quoted(w, rel.Right.Table)
io.WriteString(w, ` WHERE ((`)
colWithTable(w, rel.Left.Table, rel.Left.Col)
io.WriteString(w, `) = (`)
colWithTable(w, rel.Right.Table, rel.Right.Col)
io.WriteString(w, `)`)
if item.relPC.Type == RelOneToMany {
if conn, ok := item.data["where"]; ok {
io.WriteString(w, ` AND `)
renderWhereFromJSON(w, item.kvitem, "where", conn)
} else if conn, ok := item.data["_where"]; ok {
io.WriteString(w, ` AND `)
renderWhereFromJSON(w, item.kvitem, "_where", conn)
}
}
io.WriteString(w, `)`)
} else {
io.WriteString(w, ` WHERE `)
if err := c.renderWhere(&qc.Selects[0], ti); err != nil {
return err
}
}
io.WriteString(w, ` RETURNING `)
quoted(w, ti.Name)
io.WriteString(w, `.*)`)
return nil
}
func nestedUpdateRelColumnsMap(item kvitem) map[string]struct{} {
sk := make(map[string]struct{}, len(item.items))
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
sk[v.relCP.Right.Col] = struct{}{}
}
}
return sk
}
func renderNestedUpdateRelColumns(w io.Writer, item kvitem, values bool) error {
// Render child foreign key columns if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
if values {
// if v.relCP.Right.Array {
// io.WriteString(w, `array_diff(`)
// colWithTable(w, v.relCP.Right.Table, v.relCP.Right.Col)
// io.WriteString(w, `, `)
// }
if v._ctype > 0 {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `".`)
quoted(w, v.relCP.Left.Col)
} else {
colWithTable(w, v.relCP.Left.Table, v.relCP.Left.Col)
}
// if v.relCP.Right.Array {
// io.WriteString(w, `)`)
// }
} else {
quoted(w, v.relCP.Right.Col)
}
}
}
return nil
}
func renderNestedUpdateRelTables(w io.Writer, item kvitem) error {
// Render tables needed to set values if child-to-parent
// relationship is one-to-many
for _, v := range item.items {
if v._ctype > 0 && v.relCP.Type == RelOneToMany {
io.WriteString(w, `"_x_`)
io.WriteString(w, v.relCP.Left.Table)
io.WriteString(w, `", `)
}
}
return nil
}
func (c *compilerContext) renderDelete(qc *qcode.QCode, w io.Writer,
vars Variables, ti *DBTableInfo) (uint32, error) {
root := &qc.Selects[0]
io.WriteString(c.w, `WITH `)
quoted(c.w, ti.Name)
io.WriteString(c.w, ` AS (DELETE FROM `)
quoted(c.w, ti.Name)
io.WriteString(c.w, ` WHERE `)
if err := c.renderWhere(root, ti); err != nil {
return 0, err
}
io.WriteString(w, ` RETURNING `)
quoted(w, ti.Name)
io.WriteString(w, `.*)`)
return 0, nil
}

339
psql/update_test.go Normal file
View File

@ -0,0 +1,339 @@
package psql
import (
"encoding/json"
"testing"
)
func singleUpdate(t *testing.T) {
gql := `mutation {
product(id: 15, update: $update, where: { id: { eq: 1 } }) {
id
name
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{update}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "description") = (SELECT "t"."name", "t"."description" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE ((("products"."id") IS NOT DISTINCT FROM 1) AND (("products"."id") = 15)) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"update": json.RawMessage(` { "name": "my_name", "description": "my_desc" }`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "anon")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func simpleUpdateWithPresets(t *testing.T) {
gql := `mutation {
product(update: $data) {
id
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "updated_at") = (SELECT "t"."name", "t"."price", 'now' :: timestamp without time zone FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."user_id") IS NOT DISTINCT FROM '{{user_id}}' :: bigint) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{"name": "Apple", "price": 1.25}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "user")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedUpdateManyToMany(t *testing.T) {
gql := `mutation {
purchase(update: $data, id: 5) {
sale_type
quantity
due_date
customer {
id
full_name
email
}
product {
id
name
price
}
}
}`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = 5) RETURNING "purchases".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*) SELECT json_object_agg('purchase', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "product_1_join"."json_1" AS "product", "customer_2_join"."json_2" AS "customer") AS "json_row_0")) AS "json_0" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email") AS "json_row_2")) AS "json_2" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2" LIMIT ('1') :: integer) AS "customer_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "purchases" AS (UPDATE "purchases" SET ("sale_type", "quantity", "due_date") = (SELECT "t"."sale_type", "t"."quantity", "t"."due_date" FROM "_sg_input" i, json_populate_record(NULL::purchases, i.j) t) WHERE (("purchases"."id") = 5) RETURNING "purchases".*), "customers" AS (UPDATE "customers" SET ("full_name", "email") = (SELECT "t"."full_name", "t"."email" FROM "_sg_input" i, json_populate_record(NULL::customers, i.j->'customer') t) FROM "purchases" WHERE (("customers"."id") = ("purchases"."customer_id")) RETURNING "customers".*), "products" AS (UPDATE "products" SET ("name", "price") = (SELECT "t"."name", "t"."price" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "purchases" WHERE (("products"."id") = ("purchases"."product_id")) RETURNING "products".*) SELECT json_object_agg('purchase', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "purchases_0"."sale_type" AS "sale_type", "purchases_0"."quantity" AS "quantity", "purchases_0"."due_date" AS "due_date", "product_1_join"."json_1" AS "product", "customer_2_join"."json_2" AS "customer") AS "json_row_0")) AS "json_0" FROM (SELECT "purchases"."sale_type", "purchases"."quantity", "purchases"."due_date", "purchases"."product_id", "purchases"."customer_id" FROM "purchases" LIMIT ('1') :: integer) AS "purchases_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_2" FROM (SELECT "customers_2"."id" AS "id", "customers_2"."full_name" AS "full_name", "customers_2"."email" AS "email") AS "json_row_2")) AS "json_2" FROM (SELECT "customers"."id", "customers"."full_name", "customers"."email" FROM "customers" WHERE ((("customers"."id") = ("purchases_0"."customer_id"))) LIMIT ('1') :: integer) AS "customers_2" LIMIT ('1') :: integer) AS "customer_2_join" ON ('true') LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."id") = ("purchases_0"."product_id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(` {
"sale_type": "bought",
"quantity": 5,
"due_date": "now",
"customer": {
"email": "thedude@rug.com",
"full_name": "The Dude"
},
"product": {
"name": "Apple",
"price": 1.25
}
}
`),
}
for i := 0; i < 1000; i++ {
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql1 && string(resSQL) != sql2 {
t.Fatal(errNotExpected)
}
}
}
func nestedUpdateOneToMany(t *testing.T) {
gql := `mutation {
user(update: $data, where: { id: { eq: 8 } }) {
id
full_name
email
product {
id
name
price
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") IS NOT DISTINCT FROM 8) RETURNING "users".*), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j->'product') t) FROM "users" WHERE (("products"."user_id") = ("users"."id") AND "products"."id"= ((i.j->'product'->'where'->>'id'))::bigint) RETURNING "products".*) SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "product_1_join"."json_1" AS "product") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"where": {
"id": 2
},
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now"
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedUpdateOneToOne(t *testing.T) {
gql := `mutation {
product(update: $data, id: 6) {
id
name
user {
id
full_name
email
}
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "products" AS (UPDATE "products" SET ("name", "price", "created_at", "updated_at") = (SELECT "t"."name", "t"."price", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 6) RETURNING "products".*), "users" AS (UPDATE "users" SET ("email") = (SELECT "t"."email" FROM "_sg_input" i, json_populate_record(NULL::users, i.j->'user') t) FROM "products" WHERE (("users"."id") = ("products"."user_id")) RETURNING "users".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"created_at": "now",
"updated_at": "now",
"user": {
"email": "thedude@rug.com"
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
func nestedUpdateOneToManyWithConnect(t *testing.T) {
gql := `mutation {
user(update: $data, id: 6) {
id
full_name
email
product {
id
name
price
}
}
}`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (UPDATE "users" SET ("full_name", "email", "created_at", "updated_at") = (SELECT "t"."full_name", "t"."email", "t"."created_at", "t"."updated_at" FROM "_sg_input" i, json_populate_record(NULL::users, i.j) t) WHERE (("users"."id") = 6) RETURNING "users".*), "products_c" AS ( UPDATE "products" SET "user_id" = "users"."id" FROM "users" WHERE ("products"."id"= ((i.j->'product'->'connect'->>'id'))::bigint) RETURNING "products".*), "products_d" AS ( UPDATE "products" SET "user_id" = NULL FROM "users" WHERE ("products"."id"= ((i.j->'product'->'disconnect'->>'id'))::bigint) RETURNING "products".*), "products" AS (SELECT * FROM "products_c" UNION ALL SELECT * FROM "products_d") SELECT json_object_agg('user', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "users_0"."id" AS "id", "users_0"."full_name" AS "full_name", "users_0"."email" AS "email", "product_1_join"."json_1" AS "product") AS "json_row_0")) AS "json_0" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" LIMIT ('1') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "products_1"."id" AS "id", "products_1"."name" AS "name", "products_1"."price" AS "price") AS "json_row_1")) AS "json_1" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('1') :: integer) AS "products_1" LIMIT ('1') :: integer) AS "product_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"email": "thedude@rug.com",
"full_name": "The Dude",
"created_at": "now",
"updated_at": "now",
"product": {
"connect": { "id": 7 },
"disconnect": { "id": 8 }
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql1 {
t.Fatal(errNotExpected)
}
}
func nestedUpdateOneToOneWithConnect(t *testing.T) {
gql := `mutation {
product(update: $data, id: 9) {
id
name
user {
id
full_name
email
}
}
}`
sql1 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint AND "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
sql2 := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT "id" FROM "_sg_input" i,"users" WHERE "users"."email"= ((i.j->'user'->'connect'->>'email'))::character varying AND "users"."id"= ((i.j->'user'->'connect'->>'id'))::bigint LIMIT 1), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 9) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "user_1_join"."json_1" AS "user") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT row_to_json((SELECT "json_row_1" FROM (SELECT "users_1"."id" AS "id", "users_1"."full_name" AS "full_name", "users_1"."email" AS "email") AS "json_row_1")) AS "json_1" FROM (SELECT "users"."id", "users"."full_name", "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('1') :: integer) AS "users_1" LIMIT ('1') :: integer) AS "user_1_join" ON ('true') LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"user": {
"connect": { "id": 5, "email": "test@test.com" }
}
}`),
}
for i := 0; i < 1000; i++ {
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql1 && string(resSQL) != sql2 {
t.Fatal(errNotExpected)
}
}
}
func nestedUpdateOneToOneWithDisconnect(t *testing.T) {
gql := `mutation {
product(update: $data, id: 2) {
id
name
user_id
}
}`
sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "_x_users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "_x_users"."id" FROM "_sg_input" i, "_x_users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
vars := map[string]json.RawMessage{
"data": json.RawMessage(`{
"name": "Apple",
"price": 1.25,
"user": {
"disconnect": { "id": 5 }
}
}`),
}
resSQL, err := compileGQLToPSQL(gql, vars, "admin")
if err != nil {
t.Fatal(err)
}
if string(resSQL) != sql {
t.Fatal(errNotExpected)
}
}
// func nestedUpdateOneToOneWithDisconnectArray(t *testing.T) {
// gql := `mutation {
// product(update: $data, id: 2) {
// id
// name
// user_id
// }
// }`
// sql := `WITH "_sg_input" AS (SELECT '{{data}}' :: json AS j), "users" AS (SELECT * FROM (VALUES(NULL::bigint)) AS LOOKUP("id")), "products" AS (UPDATE "products" SET ("name", "price", "user_id") = (SELECT "t"."name", "t"."price", "users"."id" FROM "_sg_input" i, "users", json_populate_record(NULL::products, i.j) t) WHERE (("products"."id") = 2) RETURNING "products".*) SELECT json_object_agg('product', json_0) FROM (SELECT row_to_json((SELECT "json_row_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."user_id" AS "user_id") AS "json_row_0")) AS "json_0" FROM (SELECT "products"."id", "products"."name", "products"."user_id" FROM "products" LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "sel_0"`
// vars := map[string]json.RawMessage{
// "data": json.RawMessage(`{
// "name": "Apple",
// "price": 1.25,
// "user": {
// "disconnect": { "id": 5 }
// }
// }`),
// }
// resSQL, err := compileGQLToPSQL(gql, vars, "admin")
// if err != nil {
// t.Fatal(err)
// }
// if string(resSQL) != sql {
// t.Fatal(errNotExpected)
// }
// }
func TestCompileUpdate(t *testing.T) {
t.Run("singleUpdate", singleUpdate)
t.Run("simpleUpdateWithPresets", simpleUpdateWithPresets)
t.Run("nestedUpdateManyToMany", nestedUpdateManyToMany)
t.Run("nestedUpdateOneToMany", nestedUpdateOneToMany)
t.Run("nestedUpdateOneToOne", nestedUpdateOneToOne)
t.Run("nestedUpdateOneToManyWithConnect", nestedUpdateOneToManyWithConnect)
t.Run("nestedUpdateOneToOneWithConnect", nestedUpdateOneToOneWithConnect)
t.Run("nestedUpdateOneToOneWithDisconnect", nestedUpdateOneToOneWithDisconnect)
//t.Run("nestedUpdateOneToOneWithDisconnectArray", nestedUpdateOneToOneWithDisconnectArray)
}

View File

@ -1,13 +1,13 @@
package qcode
import (
"regexp"
"sort"
"strings"
)
type Config struct {
Blocklist []string
KeepArgs bool
}
type QueryConfig struct {
@ -45,6 +45,7 @@ type trval struct {
query struct {
limit string
fil *Exp
filNU bool
cols map[string]struct{}
disable struct {
funcs bool
@ -53,6 +54,7 @@ type trval struct {
insert struct {
fil *Exp
filNU bool
cols map[string]struct{}
psmap map[string]string
pslist []string
@ -60,14 +62,16 @@ type trval struct {
update struct {
fil *Exp
filNU bool
cols map[string]struct{}
psmap map[string]string
pslist []string
}
delete struct {
fil *Exp
cols map[string]struct{}
fil *Exp
filNU bool
cols map[string]struct{}
}
}
@ -80,7 +84,7 @@ func (trv *trval) allowedColumns(qt QType) map[string]struct{} {
case QTUpdate:
return trv.update.cols
case QTDelete:
return trv.insert.cols
return trv.delete.cols
case QTUpsert:
return trv.insert.cols
}
@ -88,21 +92,21 @@ func (trv *trval) allowedColumns(qt QType) map[string]struct{} {
return nil
}
func (trv *trval) filter(qt QType) *Exp {
func (trv *trval) filter(qt QType) (*Exp, bool) {
switch qt {
case QTQuery:
return trv.query.fil
return trv.query.fil, trv.query.filNU
case QTInsert:
return trv.insert.fil
return trv.insert.fil, trv.insert.filNU
case QTUpdate:
return trv.update.fil
return trv.update.fil, trv.update.filNU
case QTDelete:
return trv.delete.fil
return trv.delete.fil, trv.delete.filNU
case QTUpsert:
return trv.insert.fil
return trv.insert.fil, trv.insert.filNU
}
return nil
return nil, false
}
func listToMap(list []string) map[string]struct{} {
@ -115,9 +119,18 @@ func listToMap(list []string) map[string]struct{} {
func mapToList(m map[string]string) []string {
list := []string{}
for k, _ := range m {
for k := range m {
list = append(list, strings.ToLower(k))
}
sort.Strings(list)
return list
}
var varRe = regexp.MustCompile(`\$([a-zA-Z0-9_]+)`)
func parsePresets(m map[string]string) map[string]string {
for k, v := range m {
m[k] = varRe.ReplaceAllString(v, `{{$1}}`)
}
return m
}

View File

@ -4,6 +4,12 @@ package qcode
// FuzzerEntrypoint for Fuzzbuzz
func Fuzz(data []byte) int {
qt := GetQType(string(data))
if qt > QTUpsert {
panic("qt > QTUpsert")
}
qcompile, _ := NewCompiler(Config{})
_, err := qcompile.Compile(data, "user")
if err != nil {

View File

@ -28,10 +28,10 @@ type Pos int
// item represents a token or text string returned from the scanner.
type item struct {
typ itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
end Pos // The ending position, in bytes, of this item in the input string.
line uint16 // The line number at the start of this item.
_type itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
end Pos // The ending position, in bytes, of this item in the input string.
line int16 // The line number at the start of this item.
}
// itemType identifies the type of lex items.
@ -87,7 +87,7 @@ type lexer struct {
width Pos // width of last rune read from input
items []item // array of scanned items
itemsA [50]item
line uint16 // 1+number of newlines seen
line int16 // 1+number of newlines seen
err error
}
@ -137,7 +137,7 @@ func (l *lexer) emit(t itemType) {
l.items = append(l.items, item{t, l.start, l.pos, l.line})
// Some items contain text internally. If so, count their newlines.
switch t {
case itemName:
case itemStringVal:
for i := l.start; i < l.pos; i++ {
if l.input[i] == '\n' {
l.line++
@ -147,13 +147,14 @@ func (l *lexer) emit(t itemType) {
l.start = l.pos
}
func (l *lexer) emitL(t itemType) {
s, e := l.current()
lowercase(l.input, s, e)
l.emit(t)
}
// ignore skips over the pending input before this point.
func (l *lexer) ignore() {
for i := l.start; i < l.pos; i++ {
if l.input[i] == '\n' {
l.line++
}
}
l.start = l.pos
}
@ -211,7 +212,7 @@ func lex(l *lexer, input []byte) error {
l.run()
if last := l.items[len(l.items)-1]; last.typ == itemError {
if last := l.items[len(l.items)-1]; last._type == itemError {
return l.err
}
return nil
@ -295,19 +296,17 @@ func lexName(l *lexer) stateFn {
l.backup()
s, e := l.current()
lowercase(l.input, s, e)
switch {
case equals(l.input, s, e, queryToken):
l.emit(itemQuery)
l.emitL(itemQuery)
case equals(l.input, s, e, mutationToken):
l.emit(itemMutation)
l.emitL(itemMutation)
case equals(l.input, s, e, subscriptionToken):
l.emit(itemSub)
l.emitL(itemSub)
case equals(l.input, s, e, trueToken):
l.emit(itemBoolVal)
l.emitL(itemBoolVal)
case equals(l.input, s, e, falseToken):
l.emit(itemBoolVal)
l.emitL(itemBoolVal)
default:
l.emit(itemName)
}
@ -432,10 +431,10 @@ func lowercase(b []byte, s Pos, e Pos) {
}
}
func (i *item) String() string {
func (i item) String() string {
var v string
switch i.typ {
switch i._type {
case itemEOF:
v = "EOF"
case itemError:
@ -461,7 +460,7 @@ func (i *item) String() string {
case itemStringVal:
v = "string"
}
return fmt.Sprintf("%s", v)
return v
}
/*

View File

@ -26,13 +26,13 @@ const (
opQuery
opMutate
opSub
nodeStr
nodeInt
nodeFloat
nodeBool
nodeObj
nodeList
nodeVar
NodeStr
NodeInt
NodeFloat
NodeBool
NodeObj
NodeList
NodeVar
)
type Operation struct {
@ -85,7 +85,6 @@ type Parser struct {
input []byte // the string being scanned
pos int
items []item
depth int
err error
}
@ -148,32 +147,28 @@ func parseSelectionSet(gql []byte) (*Operation, error) {
if p.peek(itemObjOpen) {
p.ignore()
}
if p.peek(itemName) {
op = opPool.Get().(*Operation)
op.Reset()
op.Type = opQuery
op.Name = ""
op.Fields = op.fieldsA[:0]
op.Args = op.argsA[:0]
op.Fields, err = p.parseFields(op.Fields)
op, err = p.parseQueryOp()
} else {
op, err = p.parseOp()
if err != nil {
return nil, err
}
}
lexPool.Put(l)
if err != nil {
return nil, err
}
if p.peek(itemObjClose) {
p.ignore()
} else {
return nil, fmt.Errorf("operation missing closing '}'")
}
if !p.peek(itemEOF) {
p.ignore()
return nil, fmt.Errorf("invalid '%s' found after closing '}'", p.current())
}
lexPool.Put(l)
return op, err
}
@ -181,7 +176,7 @@ func (p *Parser) next() item {
n := p.pos + 1
if n >= len(p.items) {
p.err = errEOT
return item{typ: itemEOF}
return item{_type: itemEOF}
}
p.pos = n
return p.items[p.pos]
@ -196,25 +191,21 @@ func (p *Parser) ignore() {
p.pos = n
}
func (p *Parser) current() item {
return p.items[p.pos]
}
func (p *Parser) eof() bool {
n := p.pos + 1
return p.items[n].typ == itemEOF
func (p *Parser) current() string {
item := p.items[p.pos]
return b2s(p.input[item.pos:item.end])
}
func (p *Parser) peek(types ...itemType) bool {
n := p.pos + 1
if p.items[n].typ == itemEOF {
return false
}
// if p.items[n]._type == itemEOF {
// return false
// }
if n >= len(p.items) {
return false
}
for i := 0; i < len(types); i++ {
if p.items[n].typ == types[i] {
if p.items[n]._type == types[i] {
return true
}
}
@ -231,7 +222,7 @@ func (p *Parser) parseOp() (*Operation, error) {
op := opPool.Get().(*Operation)
op.Reset()
switch item.typ {
switch item._type {
case itemQuery:
op.Type = opQuery
case itemMutation:
@ -259,6 +250,37 @@ func (p *Parser) parseOp() (*Operation, error) {
if p.peek(itemObjOpen) {
p.ignore()
for n := 0; n < 10; n++ {
if !p.peek(itemName) {
break
}
op.Fields, err = p.parseFields(op.Fields)
if err != nil {
return nil, err
}
}
}
return op, nil
}
func (p *Parser) parseQueryOp() (*Operation, error) {
op := opPool.Get().(*Operation)
op.Reset()
op.Type = opQuery
op.Fields = op.fieldsA[:0]
op.Args = op.argsA[:0]
var err error
for n := 0; n < 10; n++ {
if !p.peek(itemName) {
break
}
op.Fields, err = p.parseFields(op.Fields)
if err != nil {
return nil, err
@ -282,11 +304,12 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
if st.Len() == 0 {
break
} else {
continue
}
continue
}
if p.peek(itemName) == false {
if !p.peek(itemName) {
return nil, errors.New("expecting an alias or field name")
}
@ -296,22 +319,22 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
f.Args = f.argsA[:0]
f.Children = f.childrenA[:0]
// Parse the inside of the the fields () parentheses
// in short parse the args like id, where, etc
if err := p.parseField(f); err != nil {
return nil, err
}
if f.ID != 0 {
intf := st.Peek()
pid, ok := intf.(int32)
if !ok {
return nil, fmt.Errorf("14: unexpected value %v (%t)", intf, intf)
}
intf := st.Peek()
if pid, ok := intf.(int32); ok {
f.ParentID = pid
fields[pid].Children = append(fields[pid].Children, f.ID)
} else {
f.ParentID = -1
}
// The first opening curley brackets after this
// comes the columns or child fields
if p.peek(itemObjOpen) {
p.ignore()
st.Push(f.ID)
@ -323,17 +346,19 @@ func (p *Parser) parseFields(fields []Field) ([]Field, error) {
func (p *Parser) parseField(f *Field) error {
var err error
f.Name = p.val(p.next())
v := p.next()
if p.peek(itemColon) {
p.ignore()
if p.peek(itemName) {
f.Alias = f.Name
f.Name = p.val(p.next())
f.Alias = p.val(v)
f.Name = p.vall(p.next())
} else {
return errors.New("expecting an aliased field name")
}
} else {
f.Name = p.vall(v)
}
if p.peek(itemArgsOpen) {
@ -358,13 +383,13 @@ func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
p.ignore()
break
}
if p.peek(itemName) == false {
if !p.peek(itemName) {
return nil, errors.New("expecting an argument name")
}
args = append(args, Arg{Name: p.val(p.next())})
arg := &args[(len(args) - 1)]
if p.peek(itemColon) == false {
if !p.peek(itemColon) {
return nil, errors.New("missing ':' after argument name")
}
p.ignore()
@ -407,7 +432,7 @@ func (p *Parser) parseList() (*Node, error) {
return nil, errors.New("List cannot be empty")
}
parent.Type = nodeList
parent.Type = NodeList
parent.Children = nodes
return parent, nil
@ -425,12 +450,12 @@ func (p *Parser) parseObj() (*Node, error) {
break
}
if p.peek(itemName) == false {
if !p.peek(itemName) {
return nil, errors.New("expecting an argument name")
}
nodeName := p.val(p.next())
if p.peek(itemColon) == false {
if !p.peek(itemColon) {
return nil, errors.New("missing ':' after Field argument name")
}
p.ignore()
@ -444,7 +469,7 @@ func (p *Parser) parseObj() (*Node, error) {
nodes = append(nodes, node)
}
parent.Type = nodeObj
parent.Type = NodeObj
parent.Children = nodes
return parent, nil
@ -465,19 +490,19 @@ func (p *Parser) parseValue() (*Node, error) {
node := nodePool.Get().(*Node)
node.Reset()
switch item.typ {
switch item._type {
case itemIntVal:
node.Type = nodeInt
node.Type = NodeInt
case itemFloatVal:
node.Type = nodeFloat
node.Type = NodeFloat
case itemStringVal:
node.Type = nodeStr
node.Type = NodeStr
case itemBoolVal:
node.Type = nodeBool
node.Type = NodeBool
case itemName:
node.Type = nodeStr
node.Type = NodeStr
case itemVariable:
node.Type = nodeVar
node.Type = NodeVar
default:
return nil, fmt.Errorf("expecting a number, string, object, list or variable as an argument value (not %s)", p.val(p.next()))
}
@ -490,6 +515,11 @@ func (p *Parser) val(v item) string {
return b2s(p.input[v.pos:v.end])
}
func (p *Parser) vall(v item) string {
lowercase(p.input, v.pos, v.end)
return b2s(p.input[v.pos:v.end])
}
func b2s(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
@ -508,19 +538,19 @@ func (t parserType) String() string {
v = "mutation"
case opSub:
v = "subscription"
case nodeStr:
case NodeStr:
v = "node-string"
case nodeInt:
case NodeInt:
v = "node-int"
case nodeFloat:
case NodeFloat:
v = "node-float"
case nodeBool:
case NodeBool:
v = "node-bool"
case nodeVar:
case NodeVar:
v = "node-var"
case nodeObj:
case NodeObj:
v = "node-obj"
case nodeList:
case NodeList:
v = "node-list"
}
return fmt.Sprintf("<%s>", v)

View File

@ -7,17 +7,20 @@ import (
func TestCompile1(t *testing.T) {
qc, _ := NewCompiler(Config{})
qc.AddRole("user", "product", TRConfig{
err := qc.AddRole("user", "product", TRConfig{
Query: QueryConfig{
Columns: []string{"id", "Name"},
},
})
if err != nil {
t.Error(err)
}
_, err := qc.Compile([]byte(`
product(id: 15) {
_, err = qc.Compile([]byte(`
query { product(id: 15) {
id
name
}`), "user")
} }`), "user")
if err != nil {
t.Fatal(err)
@ -26,13 +29,16 @@ func TestCompile1(t *testing.T) {
func TestCompile2(t *testing.T) {
qc, _ := NewCompiler(Config{})
qc.AddRole("user", "product", TRConfig{
err := qc.AddRole("user", "product", TRConfig{
Query: QueryConfig{
Columns: []string{"ID"},
},
})
if err != nil {
t.Error(err)
}
_, err := qc.Compile([]byte(`
_, err = qc.Compile([]byte(`
query { product(id: 15) {
id
name
@ -45,13 +51,16 @@ func TestCompile2(t *testing.T) {
func TestCompile3(t *testing.T) {
qc, _ := NewCompiler(Config{})
qc.AddRole("user", "product", TRConfig{
err := qc.AddRole("user", "product", TRConfig{
Query: QueryConfig{
Columns: []string{"ID"},
},
})
if err != nil {
t.Error(err)
}
_, err := qc.Compile([]byte(`
_, err = qc.Compile([]byte(`
mutation {
product(id: 15, name: "Test") {
id
@ -91,6 +100,35 @@ func TestEmptyCompile(t *testing.T) {
}
}
func TestInvalidPostfixCompile(t *testing.T) {
gql := `mutation
updateThread {
thread(update: $data, where: { slug: { eq: $slug } }) {
slug
title
published
createdAt : created_at
totalVotes : cached_votes_total
totalPosts : cached_posts_total
vote : thread_vote(where: { user_id: { eq: $user_id } }) {
id
}
topics {
slug
name
}
}
}
}`
qcompile, _ := NewCompiler(Config{})
_, err := qcompile.Compile([]byte(gql), "anon")
if err == nil {
t.Fatal(errors.New("expecting an error"))
}
}
var gql = []byte(`
products(
# returns only 30 items

View File

@ -20,6 +20,7 @@ const (
const (
QTQuery QType = iota + 1
QTMutation
QTInsert
QTUpdate
QTDelete
@ -30,13 +31,15 @@ type QCode struct {
Type QType
ActionVar string
Selects []Select
Roots []int32
rootsA [5]int32
}
type Select struct {
ID int32
ParentID int32
Args map[string]*Node
Table string
Name string
FieldName string
Cols []Column
Where *Exp
@ -48,6 +51,7 @@ type Select struct {
Allowed map[string]struct{}
PresetMap map[string]string
PresetList []string
SkipRender bool
}
type Column struct {
@ -154,7 +158,6 @@ const (
type Compiler struct {
tr map[string]map[string]*trval
bl map[string]struct{}
ka bool
}
var expPool = sync.Pool{
@ -162,7 +165,7 @@ var expPool = sync.Pool{
}
func NewCompiler(c Config) (*Compiler, error) {
co := &Compiler{ka: c.KeepArgs}
co := &Compiler{}
co.tr = make(map[string]map[string]*trval)
co.bl = make(map[string]struct{}, len(c.Blocklist))
@ -185,7 +188,7 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
trv := &trval{}
// query config
trv.query.fil, err = compileFilter(trc.Query.Filters)
trv.query.fil, trv.query.filNU, err = compileFilter(trc.Query.Filters)
if err != nil {
return err
}
@ -196,23 +199,26 @@ func (com *Compiler) AddRole(role, table string, trc TRConfig) error {
trv.query.disable.funcs = trc.Query.DisableFunctions
// insert config
if trv.insert.fil, err = compileFilter(trc.Insert.Filters); err != nil {
trv.insert.fil, trv.insert.filNU, err = compileFilter(trc.Insert.Filters)
if err != nil {
return err
}
trv.insert.cols = listToMap(trc.Insert.Columns)
trv.insert.psmap = trc.Insert.Presets
trv.insert.psmap = parsePresets(trc.Insert.Presets)
trv.insert.pslist = mapToList(trv.insert.psmap)
// update config
if trv.update.fil, err = compileFilter(trc.Update.Filters); err != nil {
trv.update.fil, trv.update.filNU, err = compileFilter(trc.Update.Filters)
if err != nil {
return err
}
trv.update.cols = listToMap(trc.Update.Columns)
trv.update.psmap = trc.Update.Presets
trv.update.psmap = parsePresets(trc.Update.Presets)
trv.update.pslist = mapToList(trv.update.psmap)
// delete config
if trv.delete.fil, err = compileFilter(trc.Delete.Filters); err != nil {
trv.delete.fil, trv.delete.filNU, err = compileFilter(trc.Delete.Filters)
if err != nil {
return err
}
trv.delete.cols = listToMap(trc.Delete.Columns)
@ -233,6 +239,7 @@ func (com *Compiler) Compile(query []byte, role string) (*QCode, error) {
var err error
qc := QCode{Type: QTQuery}
qc.Roots = qc.rootsA[:0]
op, err := Parse(query)
if err != nil {
@ -250,7 +257,6 @@ func (com *Compiler) Compile(query []byte, role string) (*QCode, error) {
func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
id := int32(0)
parentID := int32(0)
if len(op.Fields) == 0 {
return errors.New("invalid graphql no query found")
@ -269,7 +275,13 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
if len(op.Fields) == 0 {
return errors.New("empty query")
}
st.Push(op.Fields[0].ID)
for i := range op.Fields {
if op.Fields[i].ParentID == -1 {
val := op.Fields[i].ID | (-1 << 16)
st.Push(val)
}
}
for {
if st.Len() == 0 {
@ -280,19 +292,26 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
return fmt.Errorf("selector limit reached (%d)", maxSelectors)
}
fid := st.Pop()
val := st.Pop()
fid := val & 0xFFFF
parentID := (val >> 16) & 0xFFFF
field := &op.Fields[fid]
if _, ok := com.bl[field.Name]; ok {
continue
}
if field.ParentID == -1 {
parentID = -1
}
trv := com.getRole(role, field.Name)
selects = append(selects, Select{
ID: id,
ParentID: parentID,
Table: field.Name,
Name: field.Name,
Children: make([]int32, 0, 5),
Allowed: trv.allowedColumns(action),
Functions: true,
@ -313,22 +332,27 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
s.PresetList = trv.update.pslist
}
if s.ID != 0 {
p := &selects[s.ParentID]
p.Children = append(p.Children, s.ID)
}
if len(field.Alias) != 0 {
s.FieldName = field.Alias
} else {
s.FieldName = s.Table
s.FieldName = s.Name
}
err := com.compileArgs(qc, s, field.Args)
err := com.compileArgs(qc, s, field.Args, role)
if err != nil {
return err
}
// Order is important addFilters must come after compileArgs
com.addFilters(qc, s, role)
if s.ParentID == -1 {
qc.Roots = append(qc.Roots, s.ID)
} else {
p := &selects[s.ParentID]
p.Children = append(p.Children, s.ID)
}
s.Cols = make([]Column, 0, len(field.Children))
action = QTQuery
@ -340,8 +364,8 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
}
if len(f.Children) != 0 {
parentID = s.ID
st.Push(f.ID)
val := f.ID | (s.ID << 16)
st.Push(val)
continue
}
@ -362,74 +386,92 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
return errors.New("invalid query")
}
var fil *Exp
root := &selects[0]
if trv, ok := com.tr[role][op.Fields[0].Name]; ok {
fil = trv.filter(qc.Type)
}
if fil != nil {
switch fil.Op {
case OpNop:
case OpFalse:
root.Where = fil
default:
if root.Where != nil {
ow := root.Where
root.Where = expPool.Get().(*Exp)
root.Where.Reset()
root.Where.Op = OpAnd
root.Where.Children = root.Where.childrenA[:2]
root.Where.Children[0] = fil
root.Where.Children[1] = ow
} else {
root.Where = fil
}
}
}
qc.Selects = selects[:id]
return nil
}
func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg) error {
var err error
func (com *Compiler) addFilters(qc *QCode, sel *Select, role string) {
var fil *Exp
var nu bool
if com.ka {
sel.Args = make(map[string]*Node, len(args))
if trv, ok := com.tr[role][sel.Name]; ok {
fil, nu = trv.filter(qc.Type)
} else if role == "anon" {
// Tables not defined under the anon role will not be rendered
sel.SkipRender = true
return
} else {
return
}
if fil == nil {
return
}
if nu && role == "anon" {
sel.SkipRender = true
}
switch fil.Op {
case OpNop:
case OpFalse:
sel.Where = fil
default:
if sel.Where != nil {
ow := sel.Where
sel.Where = expPool.Get().(*Exp)
sel.Where.Reset()
sel.Where.Op = OpAnd
sel.Where.Children = sel.Where.childrenA[:2]
sel.Where.Children[0] = fil
sel.Where.Children[1] = ow
} else {
sel.Where = fil
}
}
}
func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg, role string) error {
var err error
var ka bool
for i := range args {
arg := &args[i]
switch arg.Name {
case "id":
err = com.compileArgID(sel, arg)
err, ka = com.compileArgID(sel, arg)
case "search":
err = com.compileArgSearch(sel, arg)
err, ka = com.compileArgSearch(sel, arg)
case "where":
err = com.compileArgWhere(sel, arg)
err, ka = com.compileArgWhere(sel, arg, role)
case "orderby", "order_by", "order":
err = com.compileArgOrderBy(sel, arg)
err, ka = com.compileArgOrderBy(sel, arg)
case "distinct_on", "distinct":
err = com.compileArgDistinctOn(sel, arg)
err, ka = com.compileArgDistinctOn(sel, arg)
case "limit":
err = com.compileArgLimit(sel, arg)
err, ka = com.compileArgLimit(sel, arg)
case "offset":
err = com.compileArgOffset(sel, arg)
err, ka = com.compileArgOffset(sel, arg)
}
if !ka {
nodePool.Put(arg.Val)
}
if err != nil {
return err
}
if sel.Args != nil {
sel.Args[arg.Name] = arg.Val
} else {
nodePool.Put(arg.Val)
}
}
return nil
@ -437,7 +479,7 @@ func (com *Compiler) compileArgs(qc *QCode, sel *Select, args []Arg) error {
func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
setActionVar := func(arg *Arg) error {
if arg.Val.Type != nodeVar {
if arg.Val.Type != NodeVar {
return fmt.Errorf("value for argument '%s' must be a variable", arg.Name)
}
qc.ActionVar = arg.Val.Val
@ -460,7 +502,7 @@ func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
case "delete":
qc.Type = QTDelete
if arg.Val.Type != nodeBool {
if arg.Val.Type != NodeBool {
return fmt.Errorf("value for argument '%s' must be a boolean", arg.Name)
}
@ -474,19 +516,20 @@ func (com *Compiler) setMutationType(qc *QCode, args []Arg) error {
return nil
}
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, error) {
if arg.Val.Type != nodeObj {
return nil, fmt.Errorf("expecting an object")
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, bool, error) {
if arg.Val.Type != NodeObj {
return nil, false, fmt.Errorf("expecting an object")
}
return com.compileArgNode(st, arg.Val, true)
}
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, bool, error) {
var root *Exp
var needsUser bool
if node == nil || len(node.Children) == 0 {
return nil, errors.New("invalid argument value")
return nil, needsUser, errors.New("invalid argument value")
}
pushChild(st, nil, node)
@ -499,7 +542,7 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
intf := st.Pop()
node, ok := intf.(*Node)
if !ok || node == nil {
return nil, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
return nil, needsUser, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
}
// Objects inside a list
@ -515,23 +558,22 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
ex, err := newExp(st, node, usePool)
if err != nil {
return nil, err
return nil, needsUser, err
}
if ex == nil {
continue
}
if ex.Type == ValVar && ex.Val == "user_id" {
needsUser = true
}
if node.exp == nil {
root = ex
} else {
node.exp.Children = append(node.exp.Children, ex)
}
}
if com.ka {
return root, nil
}
pushChild(st, nil, node)
@ -549,16 +591,16 @@ func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*
nodePool.Put(node)
}
return root, nil
return root, needsUser, nil
}
func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
func (com *Compiler) compileArgID(sel *Select, arg *Arg) (error, bool) {
if sel.ID != 0 {
return nil
return nil, false
}
if sel.Where != nil && sel.Where.Op == OpEqID {
return nil
return nil, false
}
ex := expPool.Get().(*Exp)
@ -568,30 +610,41 @@ func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
ex.Val = arg.Val.Val
switch arg.Val.Type {
case nodeStr:
case NodeStr:
ex.Type = ValStr
case nodeInt:
case NodeInt:
ex.Type = ValInt
case nodeFloat:
case NodeFloat:
ex.Type = ValFloat
case nodeVar:
case NodeVar:
ex.Type = ValVar
default:
return fmt.Errorf("expecting a string, int, float or variable")
return fmt.Errorf("expecting a string, int, float or variable"), false
}
sel.Where = ex
return nil
return nil, false
}
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) error {
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) (error, bool) {
ex := expPool.Get().(*Exp)
ex.Reset()
ex.Op = OpTsQuery
ex.Type = ValStr
ex.Val = arg.Val.Val
if arg.Val.Type == NodeVar {
ex.Type = ValVar
} else {
ex.Type = ValStr
}
if sel.Args == nil {
sel.Args = make(map[string]*Node)
}
sel.Args[arg.Name] = arg.Val
if sel.Where != nil {
ow := sel.Where
@ -604,16 +657,20 @@ func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) error {
} else {
sel.Where = ex
}
return nil
return nil, true
}
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg) error {
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg, role string) (error, bool) {
st := util.NewStack()
var err error
ex, err := com.compileArgObj(st, arg)
ex, nu, err := com.compileArgObj(st, arg)
if err != nil {
return err
return err, false
}
if nu && role == "anon" {
sel.SkipRender = true
}
if sel.Where != nil {
@ -629,12 +686,12 @@ func (com *Compiler) compileArgWhere(sel *Select, arg *Arg) error {
sel.Where = ex
}
return nil
return nil, false
}
func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
if arg.Val.Type != nodeObj {
return fmt.Errorf("expecting an object")
func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) (error, bool) {
if arg.Val.Type != NodeObj {
return fmt.Errorf("expecting an object"), false
}
st := util.NewStack()
@ -652,23 +709,19 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
node, ok := intf.(*Node)
if !ok || node == nil {
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf)
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf), false
}
if _, ok := com.bl[node.Name]; ok {
if !com.ka {
nodePool.Put(node)
}
nodePool.Put(node)
continue
}
if node.Type == nodeObj {
if node.Type == NodeObj {
for i := range node.Children {
st.Push(node.Children[i])
}
if !com.ka {
nodePool.Put(node)
}
nodePool.Put(node)
continue
}
@ -688,65 +741,60 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
case "desc_nulls_last":
ob.Order = OrderDescNullsLast
default:
return fmt.Errorf("valid values include asc, desc, asc_nulls_first and desc_nulls_first")
return fmt.Errorf("valid values include asc, desc, asc_nulls_first and desc_nulls_first"), false
}
setOrderByColName(ob, node)
sel.OrderBy = append(sel.OrderBy, ob)
if !com.ka {
nodePool.Put(node)
}
nodePool.Put(node)
}
return nil
return nil, false
}
func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) error {
func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) (error, bool) {
node := arg.Val
if _, ok := com.bl[node.Name]; ok {
return nil
return nil, false
}
if node.Type != nodeList && node.Type != nodeStr {
return fmt.Errorf("expecting a list of strings or just a string")
if node.Type != NodeList && node.Type != NodeStr {
return fmt.Errorf("expecting a list of strings or just a string"), false
}
if node.Type == nodeStr {
if node.Type == NodeStr {
sel.DistinctOn = append(sel.DistinctOn, node.Val)
}
for i := range node.Children {
sel.DistinctOn = append(sel.DistinctOn, node.Children[i].Val)
if !com.ka {
nodePool.Put(node.Children[i])
}
nodePool.Put(node.Children[i])
}
return nil
return nil, false
}
func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) error {
func (com *Compiler) compileArgLimit(sel *Select, arg *Arg) (error, bool) {
node := arg.Val
if node.Type != nodeInt {
return fmt.Errorf("expecting an integer")
if node.Type != NodeInt {
return fmt.Errorf("expecting an integer"), false
}
sel.Paging.Limit = node.Val
return nil
return nil, false
}
func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) error {
func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) (error, bool) {
node := arg.Val
if node.Type != nodeInt {
return fmt.Errorf("expecting an integer")
if node.Type != NodeInt {
return fmt.Errorf("expecting an integer"), false
}
sel.Paging.Offset = node.Val
return nil
return nil, false
}
var zeroTrv = &trval{}
@ -861,17 +909,17 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
if ex.Op != OpAnd && ex.Op != OpOr && ex.Op != OpNot {
switch node.Type {
case nodeStr:
case NodeStr:
ex.Type = ValStr
case nodeInt:
case NodeInt:
ex.Type = ValInt
case nodeBool:
case NodeBool:
ex.Type = ValBool
case nodeFloat:
case NodeFloat:
ex.Type = ValFloat
case nodeList:
case NodeList:
ex.Type = ValList
case nodeVar:
case NodeVar:
ex.Type = ValVar
default:
return nil, fmt.Errorf("[Where] valid values include string, int, float, boolean and list: %s", node.Type)
@ -885,13 +933,13 @@ func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
func setListVal(ex *Exp, node *Node) {
if len(node.Children) != 0 {
switch node.Children[0].Type {
case nodeStr:
case NodeStr:
ex.ListType = ValStr
case nodeInt:
case NodeInt:
ex.ListType = ValInt
case nodeBool:
case NodeBool:
ex.ListType = ValBool
case nodeFloat:
case NodeFloat:
ex.ListType = ValFloat
}
}
@ -904,7 +952,7 @@ func setWhereColName(ex *Exp, node *Node) {
var list []string
for n := node.Parent; n != nil; n = n.Parent {
if n.Type != nodeObj {
if n.Type != NodeObj {
continue
}
if len(n.Name) != 0 {
@ -916,10 +964,13 @@ func setWhereColName(ex *Exp, node *Node) {
list = append([]string{k}, list...)
}
}
if len(list) == 1 {
listlen := len(list)
if listlen == 1 {
ex.Col = list[0]
} else if len(list) > 1 {
ex.NestedCols = list
} else if listlen > 1 {
ex.Col = list[listlen-1]
ex.NestedCols = list[:listlen]
}
}
@ -949,28 +1000,38 @@ func pushChild(st *util.Stack, exp *Exp, node *Node) {
}
func compileFilter(filter []string) (*Exp, error) {
func compileFilter(filter []string) (*Exp, bool, error) {
var fl *Exp
var needsUser bool
com := &Compiler{}
st := util.NewStack()
if len(filter) == 0 {
return &Exp{Op: OpNop, doFree: false}, nil
return &Exp{Op: OpNop, doFree: false}, false, nil
}
for i := range filter {
if filter[i] == "false" {
return &Exp{Op: OpFalse, doFree: false}, nil
return &Exp{Op: OpFalse, doFree: false}, false, nil
}
node, err := ParseArgValue(filter[i])
if err != nil {
return nil, err
return nil, false, err
}
f, err := com.compileArgNode(st, node, false)
f, nu, err := com.compileArgNode(st, node, false)
if err != nil {
return nil, err
return nil, false, err
}
if nu {
needsUser = true
}
// TODO: Invalid table names in nested where causes fail silently
// returning a nil 'f' this needs to be fixed
// TODO: Invalid where clauses such as missing op (eg. eq) also fail silently
if fl == nil {
fl = f
@ -978,7 +1039,7 @@ func compileFilter(filter []string) (*Exp, error) {
fl = &Exp{Op: OpAnd, Children: []*Exp{fl, f}, doFree: false}
}
}
return fl, nil
return fl, needsUser, nil
}
func buildPath(a []string) string {
@ -1065,7 +1126,6 @@ func (t ExpOp) String() string {
}
func FreeExp(ex *Exp) {
// fmt.Println(">", ex.doFree)
if ex.doFree {
expPool.Put(ex)
}

23
qcode/utils.go Normal file
View File

@ -0,0 +1,23 @@
package qcode
func GetQType(gql string) QType {
for i := range gql {
b := gql[i]
if b == '{' {
return QTQuery
}
if al(b) {
switch b {
case 'm', 'M':
return QTMutation
case 'q', 'Q':
return QTQuery
}
}
}
return -1
}
func al(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
}

41
serv/actions.go Normal file
View File

@ -0,0 +1,41 @@
package serv
import (
"fmt"
"net/http"
)
type actionFn func(w http.ResponseWriter, r *http.Request) error
func newAction(a configAction) (http.Handler, error) {
var fn actionFn
var err error
if len(a.SQL) != 0 {
fn, err = newSQLAction(a)
} else {
return nil, fmt.Errorf("invalid config for action '%s'", a.Name)
}
if err != nil {
return nil, err
}
httpFn := func(w http.ResponseWriter, r *http.Request) {
if err := fn(w, r); err != nil {
errlog.Error().Err(err).Send()
errorResp(w, err)
}
}
return http.HandlerFunc(httpFn), nil
}
func newSQLAction(a configAction) (actionFn, error) {
fn := func(w http.ResponseWriter, r *http.Request) error {
_, err := db.Exec(r.Context(), a.SQL)
return err
}
return fn, nil
}

View File

@ -1,276 +0,0 @@
package serv
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strings"
)
const (
AL_QUERY int = iota + 1
AL_VARS
)
type allowItem struct {
uri string
gql string
vars json.RawMessage
}
var _allowList allowList
type allowList struct {
list []*allowItem
index map[string]int
filepath string
saveChan chan *allowItem
active bool
}
func initAllowList(cpath string) {
_allowList = allowList{
index: make(map[string]int),
saveChan: make(chan *allowItem),
active: true,
}
if len(cpath) != 0 {
fp := path.Join(cpath, "allow.list")
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
logger.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
fp := "./allow.list"
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
logger.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
fp := "./config/allow.list"
if _, err := os.Stat(fp); err == nil {
_allowList.filepath = fp
} else if !os.IsNotExist(err) {
logger.Fatal().Err(err).Send()
}
}
if len(_allowList.filepath) == 0 {
if conf.Production {
logger.Fatal().Msg("allow.list not found")
}
if len(cpath) == 0 {
_allowList.filepath = "./config/allow.list"
} else {
_allowList.filepath = path.Join(cpath, "allow.list")
}
logger.Warn().Msg("allow.list not found")
} else {
_allowList.load()
}
go func() {
for v := range _allowList.saveChan {
_allowList.save(v)
}
}()
}
func (al *allowList) add(req *gqlReq) {
if al.active == false || len(req.ref) == 0 || len(req.Query) == 0 {
return
}
var query string
for i := 0; i < len(req.Query); i++ {
c := req.Query[i]
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' {
query = req.Query
break
} else if c == '{' {
query = "query " + req.Query
break
}
}
al.saveChan <- &allowItem{
uri: req.ref,
gql: query,
vars: req.Vars,
}
}
func (al *allowList) load() {
if al.active == false {
return
}
b, err := ioutil.ReadFile(al.filepath)
if err != nil {
log.Fatal(err)
}
if len(b) == 0 {
return
}
var uri string
var varBytes []byte
s, e, c := 0, 0, 0
ty := 0
for {
if c == 0 && b[e] == '#' {
s = e
for e < len(b) && b[e] != '\n' {
e++
}
if (e - s) > 2 {
uri = strings.TrimSpace(string(b[(s + 1):e]))
}
}
if e >= len(b) {
break
}
if matchPrefix(b, e, "query") || matchPrefix(b, e, "mutation") {
if c == 0 {
s = e
}
ty = AL_QUERY
} else if matchPrefix(b, e, "variables") {
if c == 0 {
s = e + len("variables") + 1
}
ty = AL_VARS
} else if b[e] == '{' {
c++
} else if b[e] == '}' {
c--
if c == 0 {
if ty == AL_QUERY {
q := string(b[s:(e + 1)])
key := gqlHash(q, varBytes, "")
if idx, ok := al.index[key]; !ok {
al.list = append(al.list, &allowItem{
uri: uri,
gql: q,
vars: varBytes,
})
al.index[key] = len(al.list) - 1
} else {
item := al.list[idx]
item.gql = q
item.vars = varBytes
}
varBytes = nil
} else if ty == AL_VARS {
varBytes = b[s:(e + 1)]
}
ty = 0
}
}
e++
if e >= len(b) {
break
}
}
}
func (al *allowList) save(item *allowItem) {
if al.active == false {
return
}
key := gqlHash(item.gql, item.vars, "")
if _, ok := al.index[key]; ok {
return
}
al.list = append(al.list, item)
al.index[key] = len(al.list) - 1
f, err := os.Create(al.filepath)
if err != nil {
logger.Warn().Err(err).Msgf("Failed to write allow list: %s", al.filepath)
return
}
defer f.Close()
keys := []string{}
urlMap := make(map[string][]*allowItem)
for _, v := range al.list {
urlMap[v.uri] = append(urlMap[v.uri], v)
}
for k := range urlMap {
keys = append(keys, k)
}
sort.Strings(keys)
for i := range keys {
k := keys[i]
v := urlMap[k]
f.WriteString(fmt.Sprintf("# %s\n\n", k))
for i := range v {
if len(v[i].vars) != 0 && bytes.Equal(v[i].vars, []byte("{}")) == false {
vj, err := json.MarshalIndent(v[i].vars, "", "\t")
if err != nil {
logger.Warn().Err(err).Msg("Failed to write allow list 'vars' to file")
continue
}
f.WriteString(fmt.Sprintf("variables %s\n\n", vj))
}
if v[i].gql[0] == '{' {
f.WriteString(fmt.Sprintf("query %s\n\n", v[i].gql))
} else {
f.WriteString(fmt.Sprintf("%s\n\n", v[i].gql))
}
}
}
}
func matchPrefix(b []byte, i int, s string) bool {
if (len(b) - i) < len(s) {
return false
}
for n := 0; n < len(s); n++ {
if b[(i+n)] != s[n] {
return false
}
}
return true
}

View File

@ -2,73 +2,62 @@ package serv
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/dosco/super-graph/jsn"
)
func argMap(ctx *coreContext) func(w io.Writer, tag string) (int, error) {
func argMap(ctx context.Context, vars []byte) func(w io.Writer, tag string) (int, error) {
return func(w io.Writer, tag string) (int, error) {
switch tag {
case "user_id_provider":
if v := ctx.Value(userIDProviderKey); v != nil {
return stringArg(w, v.(string))
return io.WriteString(w, v.(string))
}
io.WriteString(w, "null")
return 0, nil
return 0, errors.New("query requires variable $user_id_provider")
case "user_id":
if v := ctx.Value(userIDKey); v != nil {
return stringArg(w, v.(string))
return io.WriteString(w, v.(string))
}
io.WriteString(w, "null")
return 0, nil
return 0, errors.New("query requires variable $user_id")
case "user_role":
if v := ctx.Value(userRoleKey); v != nil {
return stringArg(w, v.(string))
return io.WriteString(w, v.(string))
}
io.WriteString(w, "null")
return 0, errors.New("query requires variable $user_role")
}
fields := jsn.Get(vars, [][]byte{[]byte(tag)})
if len(fields) == 0 {
return 0, nil
}
fields := jsn.Get(ctx.req.Vars, [][]byte{[]byte(tag)})
if len(fields) == 0 {
return 0, fmt.Errorf("variable '%s' not found", tag)
v := fields[0].Value
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
fields[0].Value = v[1 : len(v)-1]
}
is := false
for i := range fields[0].Value {
c := fields[0].Value[i]
if c != ' ' {
is = (c == '"') || (c == '{') || (c == '[')
break
}
}
if is {
return stringArgB(w, fields[0].Value)
}
w.Write(fields[0].Value)
return 0, nil
return w.Write(escQuote(fields[0].Value))
}
}
func argList(ctx *coreContext, args [][]byte) []interface{} {
func argList(ctx *coreContext, args [][]byte) ([]interface{}, error) {
vars := make([]interface{}, len(args))
var fields map[string]interface{}
var fields map[string]json.RawMessage
var err error
if len(ctx.req.Vars) != 0 {
fields, _, err = jsn.Tree(ctx.req.Vars)
if err != nil {
logger.Warn().Err(err).Msg("Failed to parse variables")
return nil, err
}
}
@ -79,44 +68,70 @@ func argList(ctx *coreContext, args [][]byte) []interface{} {
case bytes.Equal(av, []byte("user_id")):
if v := ctx.Value(userIDKey); v != nil {
vars[i] = v.(string)
} else {
return nil, errors.New("query requires variable $user_id")
}
case bytes.Equal(av, []byte("user_id_provider")):
if v := ctx.Value(userIDProviderKey); v != nil {
vars[i] = v.(string)
} else {
return nil, errors.New("query requires variable $user_id_provider")
}
case bytes.Equal(av, []byte("user_role")):
if v := ctx.Value(userRoleKey); v != nil {
vars[i] = v.(string)
} else {
return nil, errors.New("query requires variable $user_role")
}
default:
if v, ok := fields[string(av)]; ok {
vars[i] = v
switch v[0] {
case '[', '{':
vars[i] = escQuote(v)
default:
var val interface{}
if err := json.Unmarshal(v, &val); err != nil {
return nil, err
}
vars[i] = val
}
} else {
return nil, fmt.Errorf("query requires variable $%s", string(av))
}
}
}
return vars
return vars, nil
}
func stringArg(w io.Writer, v string) (int, error) {
if n, err := w.Write([]byte(`'`)); err != nil {
return n, err
func escQuote(b []byte) []byte {
f := false
for i := range b {
if b[i] == '\'' {
f = true
break
}
}
if n, err := w.Write([]byte(v)); err != nil {
return n, err
if !f {
return b
}
return w.Write([]byte(`'`))
}
func stringArgB(w io.Writer, v []byte) (int, error) {
if n, err := w.Write([]byte(`'`)); err != nil {
return n, err
buf := &bytes.Buffer{}
s := 0
for i := range b {
if b[i] == '\'' {
buf.Write(b[s:i])
buf.WriteString(`''`)
s = i + 1
}
}
if n, err := w.Write(v); err != nil {
return n, err
l := len(b)
if s < (l - 1) {
buf.Write(b[s:l])
}
return w.Write([]byte(`'`))
return buf.Bytes()
}

View File

@ -3,16 +3,17 @@ package serv
import (
"context"
"net/http"
"strings"
)
var (
userIDProviderKey = "user_id_provider"
userIDKey = "user_id"
userRoleKey = "user_role"
type ctxkey int
const (
userIDProviderKey ctxkey = iota
userIDKey
userRoleKey
)
func headerAuth(next http.HandlerFunc) http.HandlerFunc {
func headerAuth(authc configAuth, next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -35,28 +36,53 @@ func headerAuth(next http.HandlerFunc) http.HandlerFunc {
}
}
func withAuth(next http.HandlerFunc) http.HandlerFunc {
at := conf.Auth.Type
ru := conf.Auth.Rails.URL
func headerHandler(authc configAuth, next http.Handler) http.HandlerFunc {
hdr := authc.Header
if conf.Auth.CredsInHeader {
next = headerAuth(next)
if len(hdr.Name) == 0 {
errlog.Fatal().Str("auth", authc.Name).Msg("no header.name defined")
}
switch at {
if !hdr.Exists && len(hdr.Value) == 0 {
errlog.Fatal().Str("auth", authc.Name).Msg("no header.value defined")
}
return func(w http.ResponseWriter, r *http.Request) {
var fo1 bool
value := r.Header.Get(hdr.Name)
switch {
case hdr.Exists:
fo1 = (len(value) == 0)
default:
fo1 = (value != hdr.Value)
}
if fo1 {
http.Error(w, "401 unauthorized", http.StatusUnauthorized)
return
}
next.ServeHTTP(w, r)
}
}
func withAuth(next http.Handler, authc configAuth) http.Handler {
if authc.CredsInHeader {
next = headerAuth(authc, next)
}
switch authc.Type {
case "rails":
if strings.HasPrefix(ru, "memcache:") {
return railsMemcacheHandler(next)
}
if strings.HasPrefix(ru, "redis:") {
return railsRedisHandler(next)
}
return railsCookieHandler(next)
return railsHandler(authc, next)
case "jwt":
return jwtHandler(next)
return jwtHandler(authc, next)
case "header":
return headerHandler(authc, next)
}
return next

View File

@ -11,22 +11,21 @@ import (
const (
authHeader = "Authorization"
jwtBase int = iota
jwtAuth0
jwtAuth0 int = iota + 1
)
func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
func jwtHandler(authc configAuth, next http.Handler) http.HandlerFunc {
var key interface{}
var jwtProvider int
cookie := conf.Auth.Cookie
cookie := authc.Cookie
if conf.Auth.JWT.Provider == "auth0" {
if authc.JWT.Provider == "auth0" {
jwtProvider = jwtAuth0
}
secret := conf.Auth.JWT.Secret
publicKeyFile := conf.Auth.JWT.PubKeyFile
secret := authc.JWT.Secret
publicKeyFile := authc.JWT.PubKeyFile
switch {
case len(secret) != 0:
@ -35,10 +34,10 @@ func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
case len(publicKeyFile) != 0:
kd, err := ioutil.ReadFile(publicKeyFile)
if err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
switch conf.Auth.JWT.PubKeyType {
switch authc.JWT.PubKeyType {
case "ecdsa":
key, err = jwt.ParseECPublicKeyFromPEM(kd)
@ -51,7 +50,7 @@ func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
}
if err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
}
@ -95,8 +94,11 @@ func jwtHandler(next http.HandlerFunc) http.HandlerFunc {
} else {
ctx = context.WithValue(ctx, userIDKey, claims.Subject)
}
next.ServeHTTP(w, r.WithContext(ctx))
return
}
next.ServeHTTP(w, r)
}
}

View File

@ -6,35 +6,50 @@ import (
"fmt"
"net/http"
"net/url"
"strings"
"github.com/bradfitz/gomemcache/memcache"
"github.com/dosco/super-graph/rails"
"github.com/garyburd/redigo/redis"
)
func railsRedisHandler(next http.HandlerFunc) http.HandlerFunc {
cookie := conf.Auth.Cookie
if len(cookie) == 0 {
logger.Fatal().Msg("no auth.cookie defined")
func railsHandler(authc configAuth, next http.Handler) http.HandlerFunc {
ru := authc.Rails.URL
if strings.HasPrefix(ru, "memcache:") {
return railsMemcacheHandler(authc, next)
}
if len(conf.Auth.Rails.URL) == 0 {
logger.Fatal().Msg("no auth.rails.url defined")
if strings.HasPrefix(ru, "redis:") {
return railsRedisHandler(authc, next)
}
return railsCookieHandler(authc, next)
}
func railsRedisHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := authc.Cookie
if len(cookie) == 0 {
errlog.Fatal().Msg("no auth.cookie defined")
}
if len(authc.Rails.URL) == 0 {
errlog.Fatal().Msg("no auth.rails.url defined")
}
rp := &redis.Pool{
MaxIdle: conf.Auth.Rails.MaxIdle,
MaxActive: conf.Auth.Rails.MaxActive,
MaxIdle: authc.Rails.MaxIdle,
MaxActive: authc.Rails.MaxActive,
Dial: func() (redis.Conn, error) {
c, err := redis.DialURL(conf.Auth.Rails.URL)
c, err := redis.DialURL(authc.Rails.URL)
if err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
pwd := conf.Auth.Rails.Password
pwd := authc.Rails.Password
if len(pwd) != 0 {
if _, err := c.Do("AUTH", pwd); err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
}
return c, err
@ -66,19 +81,19 @@ func railsRedisHandler(next http.HandlerFunc) http.HandlerFunc {
}
}
func railsMemcacheHandler(next http.HandlerFunc) http.HandlerFunc {
cookie := conf.Auth.Cookie
func railsMemcacheHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := authc.Cookie
if len(cookie) == 0 {
logger.Fatal().Msg("no auth.cookie defined")
errlog.Fatal().Msg("no auth.cookie defined")
}
if len(conf.Auth.Rails.URL) == 0 {
logger.Fatal().Msg("no auth.rails.url defined")
if len(authc.Rails.URL) == 0 {
errlog.Fatal().Msg("no auth.rails.url defined")
}
rURL, err := url.Parse(conf.Auth.Rails.URL)
rURL, err := url.Parse(authc.Rails.URL)
if err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
mc := memcache.New(rURL.Host)
@ -108,15 +123,15 @@ func railsMemcacheHandler(next http.HandlerFunc) http.HandlerFunc {
}
}
func railsCookieHandler(next http.HandlerFunc) http.HandlerFunc {
cookie := conf.Auth.Cookie
func railsCookieHandler(authc configAuth, next http.Handler) http.HandlerFunc {
cookie := authc.Cookie
if len(cookie) == 0 {
logger.Fatal().Msg("no auth.cookie defined")
errlog.Fatal().Msg("no auth.cookie defined")
}
ra, err := railsAuth(conf)
ra, err := railsAuth(authc)
if err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
return func(w http.ResponseWriter, r *http.Request) {
@ -139,13 +154,13 @@ func railsCookieHandler(next http.HandlerFunc) http.HandlerFunc {
}
}
func railsAuth(c *config) (*rails.Auth, error) {
secret := c.Auth.Rails.SecretKeyBase
func railsAuth(authc configAuth) (*rails.Auth, error) {
secret := authc.Rails.SecretKeyBase
if len(secret) == 0 {
return nil, errors.New("no auth.rails.secret_key_base defined")
}
version := c.Auth.Rails.Version
version := authc.Rails.Version
if len(version) == 0 {
return nil, errors.New("no auth.rails.version defined")
}
@ -155,16 +170,16 @@ func railsAuth(c *config) (*rails.Auth, error) {
return nil, err
}
if len(c.Auth.Rails.Salt) != 0 {
ra.Salt = c.Auth.Rails.Salt
if len(authc.Rails.Salt) != 0 {
ra.Salt = authc.Rails.Salt
}
if len(conf.Auth.Rails.SignSalt) != 0 {
ra.SignSalt = c.Auth.Rails.SignSalt
if len(authc.Rails.SignSalt) != 0 {
ra.SignSalt = authc.Rails.SignSalt
}
if len(conf.Auth.Rails.AuthSalt) != 0 {
ra.AuthSalt = c.Auth.Rails.AuthSalt
if len(authc.Rails.AuthSalt) != 0 {
ra.AuthSalt = authc.Rails.AuthSalt
}
return ra, nil

View File

@ -1,14 +1,13 @@
package serv
import (
"context"
"fmt"
"os"
"runtime"
"strings"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
@ -22,20 +21,31 @@ const (
)
var (
logger *zerolog.Logger
conf *config
confPath string
db *pgxpool.Pool
qcompile *qcode.Compiler
pcompile *psql.Compiler
// These variables are set using -ldflags
version string
gitBranch string
lastCommitSHA string
lastCommitTime string
)
func Init() {
logger = initLog()
var (
logger zerolog.Logger // logger for everything but errors
errlog zerolog.Logger // logger for errors includes line numbers
conf *config // parsed config
confPath string // path to the config file
db *pgxpool.Pool // database connection pool
schema *psql.DBSchema // database tables, columns and relationships
allowList *allow.List // allow.list is contains queries allowed in production
qcompile *qcode.Compiler // qcode compiler
pcompile *psql.Compiler // postgres sql compiler
)
func Cmd() {
initLog()
rootCmd := &cobra.Command{
Use: "super-graph",
Short: "An instant high-performance GraphQL API. No code needed. https://supergraph.dev",
Short: BuildDetails(),
}
rootCmd.AddCommand(&cobra.Command{
@ -110,6 +120,13 @@ e.g. db:migrate -+1
Run: cmdDBSetup,
})
rootCmd.AddCommand(&cobra.Command{
Use: "db:reset",
Short: "Reset database",
Long: "This command will drop, create, migrate and seed the database (won't run in production)",
Run: cmdDBReset,
})
rootCmd.AddCommand(&cobra.Command{
Use: "new APP-NAME",
Short: "Create a new application",
@ -124,155 +141,40 @@ e.g. db:migrate -+1
Run: cmdConfDump,
})
rootCmd.AddCommand(&cobra.Command{
Use: "version",
Short: "Super Graph binary version information",
Run: cmdVersion,
})
rootCmd.Flags().StringVar(&confPath,
"path", "./config", "path to config files")
if err := rootCmd.Execute(); err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
}
func initLog() *zerolog.Logger {
out := zerolog.ConsoleWriter{Out: os.Stderr}
logger := zerolog.New(out).
With().
Timestamp().
Caller().
Logger()
return &logger
func cmdVersion(cmd *cobra.Command, args []string) {
fmt.Printf("%s\n", BuildDetails())
}
func initConf() (*config, error) {
vi := newConfig(getConfigName())
func BuildDetails() string {
return fmt.Sprintf(`
Super Graph %v
For documentation, visit https://supergraph.dev
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
Commit SHA-1 : %v
Commit timestamp : %v
Branch : %v
Go version : %v
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newConfig(inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
logger.Fatal().Msgf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(getConfigName())
vi.MergeInConfig()
}
c := &config{}
if err := c.Init(vi); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
logLevel, err := zerolog.ParseLevel(c.LogLevel)
if err != nil {
logger.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
return c, nil
}
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
switch c.LogLevel {
case "debug":
config.LogLevel = pgx.LogLevelDebug
case "info":
config.LogLevel = pgx.LogLevelInfo
case "warn":
config.LogLevel = pgx.LogLevelWarn
case "error":
config.LogLevel = pgx.LogLevelError
default:
config.LogLevel = pgx.LogLevelNone
}
config.Logger = NewSQLLogger(*logger)
db, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initDBPool(c *config) (*pgxpool.Pool, error) {
config, _ := pgxpool.ParseConfig("")
config.ConnConfig.Host = c.DB.Host
config.ConnConfig.Port = c.DB.Port
config.ConnConfig.Database = c.DB.DBName
config.ConnConfig.User = c.DB.User
config.ConnConfig.Password = c.DB.Password
config.ConnConfig.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
switch c.LogLevel {
case "debug":
config.ConnConfig.LogLevel = pgx.LogLevelDebug
case "info":
config.ConnConfig.LogLevel = pgx.LogLevelInfo
case "warn":
config.ConnConfig.LogLevel = pgx.LogLevelWarn
case "error":
config.ConnConfig.LogLevel = pgx.LogLevelError
default:
config.ConnConfig.LogLevel = pgx.LogLevelNone
}
config.ConnConfig.Logger = NewSQLLogger(*logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
if c.DB.PoolSize != 0 {
config.MaxConns = conf.DB.PoolSize
}
db, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initCompiler() {
var err error
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
logger.Fatal().Err(err).Msg("failed to initialize compilers")
}
if err := initResolvers(); err != nil {
logger.Fatal().Err(err).Msg("failed to initialized resolvers")
}
Licensed under the Apache Public License 2.0
Copyright 2020, Vikram Rangnekar.
`,
version,
lastCommitSHA,
lastCommitTime,
gitBranch,
runtime.Version())
}

View File

@ -9,7 +9,7 @@ import (
func cmdConfDump(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.Help()
cmd.Help() //nolint: errcheck
os.Exit(1)
}
@ -17,11 +17,11 @@ func cmdConfDump(cmd *cobra.Command, args []string) {
conf, err := initConf()
if err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
errlog.Fatal().Err(err).Msg("failed to read config")
}
if err := conf.Viper.WriteConfigAs(fname); err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
logger.Info().Msgf("config dumped to ./%s", fname)

View File

@ -14,19 +14,6 @@ import (
"github.com/spf13/cobra"
)
var sampleMigration = `-- This is a sample migration.
create table users(
id serial primary key,
fullname varchar not null,
email varchar not null
);
---- create above / drop below ----
drop table users;
`
var newMigrationText = `-- Write your migrate up statements here
---- create above / drop below ----
@ -36,6 +23,7 @@ var newMigrationText = `-- Write your migrate up statements here
`
func cmdDBSetup(cmd *cobra.Command, args []string) {
initConfOnce()
cmdDBCreate(cmd, []string{})
cmdDBMigrate(cmd, []string{"up"})
@ -47,58 +35,59 @@ func cmdDBSetup(cmd *cobra.Command, args []string) {
return
}
if os.IsNotExist(err) == false {
logger.Fatal().Err(err).Msgf("unable to check if '%s' exists", sfile)
if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Msgf("unable to check if '%s' exists", sfile)
}
logger.Warn().Msgf("failed to read seed file '%s'", sfile)
}
func cmdDBCreate(cmd *cobra.Command, args []string) {
var err error
func cmdDBReset(cmd *cobra.Command, args []string) {
initConfOnce()
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
if conf.Production {
errlog.Fatal().Msg("db:reset does not work in production")
return
}
cmdDBDrop(cmd, []string{})
cmdDBSetup(cmd, []string{})
}
func cmdDBCreate(cmd *cobra.Command, args []string) {
initConfOnce()
ctx := context.Background()
conn, err := initDB(conf, false)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
errlog.Fatal().Err(err).Msg("failed to connect to database")
}
defer conn.Close(ctx)
sql := fmt.Sprintf("CREATE DATABASE %s", conf.DB.DBName)
sql := fmt.Sprintf(`CREATE DATABASE "%s"`, conf.DB.DBName)
_, err = conn.Exec(ctx, sql)
if err != nil {
logger.Fatal().Err(err).Msg("failed to create database")
errlog.Fatal().Err(err).Msg("failed to create database")
}
logger.Info().Msgf("created database '%s'", conf.DB.DBName)
}
func cmdDBDrop(cmd *cobra.Command, args []string) {
var err error
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
}
initConfOnce()
ctx := context.Background()
conn, err := initDB(conf, false)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
errlog.Fatal().Err(err).Msg("failed to connect to database")
}
defer conn.Close(ctx)
sql := fmt.Sprintf(`DROP DATABASE IF EXISTS %s`, conf.DB.DBName)
sql := fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, conf.DB.DBName)
_, err = conn.Exec(ctx, sql)
if err != nil {
logger.Fatal().Err(err).Msg("failed to create database")
errlog.Fatal().Err(err).Msg("failed to create database")
}
logger.Info().Msgf("dropped database '%s'", conf.DB.DBName)
@ -106,16 +95,11 @@ func cmdDBDrop(cmd *cobra.Command, args []string) {
func cmdDBNew(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.Help()
cmd.Help() //nolint: errcheck
os.Exit(1)
}
var err error
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
}
initConfOnce()
name := args[0]
m, err := migrate.FindMigrations(conf.MigrationsPath)
@ -124,7 +108,7 @@ func cmdDBNew(cmd *cobra.Command, args []string) {
os.Exit(1)
}
mname := fmt.Sprintf("%03d_%s.sql", len(m)+100, name)
mname := fmt.Sprintf("%d_%s.sql", len(m), name)
// Write new migration
mpath := filepath.Join(conf.MigrationsPath, mname)
@ -144,39 +128,34 @@ func cmdDBNew(cmd *cobra.Command, args []string) {
}
func cmdDBMigrate(cmd *cobra.Command, args []string) {
var err error
if len(args) == 0 {
cmd.Help()
cmd.Help() //nolint: errcheck
os.Exit(1)
}
initConfOnce()
dest := args[0]
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
}
conn, err := initDB(conf, true)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
errlog.Fatal().Err(err).Msg("failed to connect to database")
}
defer conn.Close(context.Background())
m, err := migrate.NewMigrator(conn, "schema_version")
if err != nil {
logger.Fatal().Err(err).Msg("failed to initializing migrator")
errlog.Fatal().Err(err).Msg("failed to initializing migrator")
}
m.Data = getMigrationVars()
err = m.LoadMigrations(conf.MigrationsPath)
if err != nil {
logger.Fatal().Err(err).Msg("failed to load migrations")
errlog.Fatal().Err(err).Msg("failed to load migrations")
}
if len(m.Migrations) == 0 {
logger.Fatal().Msg("No migrations found")
errlog.Fatal().Msg("No migrations found")
}
m.OnStart = func(sequence int32, name, direction, sql string) {
@ -195,7 +174,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
var n int64
n, err = strconv.ParseInt(d, 10, 32)
if err != nil {
logger.Fatal().Err(err).Msg("invalid destination")
errlog.Fatal().Err(err).Msg("invalid destination")
}
return int32(n)
}
@ -219,24 +198,22 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
err = m.MigrateTo(currentVersion + mustParseDestination(dest[1:]))
} else {
cmd.Help()
cmd.Help() //nolint: errcheck
os.Exit(1)
}
if err != nil {
logger.Info().Err(err).Send()
// logger.Info().Err(err).Send()
logger.Fatal().Err(err).Send()
// if err, ok := err.(m.MigrationPgError); ok {
// if err.Detail != "" {
// logger.Info().Err(err).Msg(err.Detail)
// info.Err(err).Msg(err.Detail)
// }
// if err.Position != 0 {
// ele, err := ExtractErrorLine(err.Sql, int(err.Position))
// if err != nil {
// logger.Fatal().Err(err).Send()
// errlog.Fatal().Err(err).Send()
// }
// prefix := fmt.Sprintf()
@ -251,37 +228,33 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
}
func cmdDBStatus(cmd *cobra.Command, args []string) {
var err error
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
}
initConfOnce()
conn, err := initDB(conf, true)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
errlog.Fatal().Err(err).Msg("failed to connect to database")
}
defer conn.Close(context.Background())
m, err := migrate.NewMigrator(conn, "schema_version")
if err != nil {
logger.Fatal().Err(err).Msg("failed to initialize migrator")
errlog.Fatal().Err(err).Msg("failed to initialize migrator")
}
m.Data = getMigrationVars()
err = m.LoadMigrations(conf.MigrationsPath)
if err != nil {
logger.Fatal().Err(err).Msg("failed to load migrations")
errlog.Fatal().Err(err).Msg("failed to load migrations")
}
if len(m.Migrations) == 0 {
logger.Fatal().Msg("no migrations found")
errlog.Fatal().Msg("no migrations found")
}
mver, err := m.GetCurrentVersion()
if err != nil {
logger.Fatal().Err(err).Msg("failed to retrieve migration")
errlog.Fatal().Err(err).Msg("failed to retrieve migration")
}
var status string
@ -338,3 +311,13 @@ func getMigrationVars() map[string]interface{} {
"env": strings.ToLower(os.Getenv("GO_ENV")),
}
}
func initConfOnce() {
var err error
if conf == nil {
if conf, err = initConf(); err != nil {
errlog.Fatal().Err(err).Msg("failed to read config")
}
}
}

View File

@ -16,7 +16,7 @@ import (
func cmdNew(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.Help()
cmd.Help() //nolint: errcheck
os.Exit(1)
}
@ -90,8 +90,8 @@ func cmdNew(cmd *cobra.Command, args []string) {
return os.Mkdir(p, os.ModePerm)
})
ifNotExists(path.Join(appMigrationsPath, "100_init.sql"), func(p string) error {
if v, err := tmpl.get("100_init.sql"); err == nil {
ifNotExists(path.Join(appMigrationsPath, "0_init.sql"), func(p string) error {
if v, err := tmpl.get("0_init.sql"); err == nil {
return ioutil.WriteFile(p, v, 0644)
} else {
return err
@ -115,13 +115,17 @@ func (t *Templ) get(name string) ([]byte, error) {
b := bytes.Buffer{}
tmpl := fasttemplate.New(v, "{%", "%}")
tmpl.ExecuteFunc(&b, func(w io.Writer, tag string) (int, error) {
_, err := tmpl.ExecuteFunc(&b, func(w io.Writer, tag string) (int, error) {
if val, ok := t.data[strings.TrimSpace(tag)]; ok {
return w.Write([]byte(val))
}
return 0, fmt.Errorf("unknown template variable '%s'", tag)
})
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
@ -133,13 +137,13 @@ func ifNotExists(filePath string, doFn func(string) error) {
return
}
if os.IsNotExist(err) == false {
logger.Fatal().Err(err).Msgf("unable to check if '%s' exists", filePath)
if !os.IsNotExist(err) {
errlog.Fatal().Err(err).Msgf("unable to check if '%s' exists", filePath)
}
err = doFn(filePath)
if err != nil {
logger.Fatal().Err(err).Msgf("unable to create '%s'", filePath)
errlog.Fatal().Err(err).Msgf("unable to create '%s'", filePath)
}
logger.Info().Msgf("created '%s'", filePath)

View File

@ -1,6 +1,7 @@
package serv
import (
"bytes"
"context"
"encoding/json"
"fmt"
@ -12,20 +13,21 @@ import (
"github.com/brianvoe/gofakeit"
"github.com/dop251/goja"
"github.com/spf13/cobra"
"github.com/valyala/fasttemplate"
)
func cmdDBSeed(cmd *cobra.Command, args []string) {
var err error
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
errlog.Fatal().Err(err).Msg("failed to read config")
}
conf.Production = false
db, err = initDBPool(conf)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
errlog.Fatal().Err(err).Msg("failed to connect to database")
}
initCompiler()
@ -34,14 +36,14 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
b, err := ioutil.ReadFile(path.Join(confPath, conf.SeedFile))
if err != nil {
logger.Fatal().Err(err).Msgf("failed to read seed file '%s'", sfile)
errlog.Fatal().Err(err).Msgf("failed to read seed file '%s'", sfile)
}
vm := goja.New()
vm.Set("graphql", graphQLFunc)
console := vm.NewObject()
console.Set("log", logFunc)
console.Set("log", logFunc) //nolint: errcheck
vm.Set("console", console)
fake := vm.NewObject()
@ -50,39 +52,83 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
_, err = vm.RunScript("seed.js", string(b))
if err != nil {
logger.Fatal().Err(err).Msg("failed to execute script")
errlog.Fatal().Err(err).Msg("failed to execute script")
}
logger.Info().Msg("seed script done")
}
//func runFunc(call goja.FunctionCall) {
func graphQLFunc(query string, data interface{}) map[string]interface{} {
b, err := json.Marshal(data)
func graphQLFunc(query string, data interface{}, opt map[string]string) map[string]interface{} {
vars, err := json.Marshal(data)
if err != nil {
logger.Fatal().Err(err).Msg("failed to json serialize")
errlog.Fatal().Err(err).Send()
}
c := &coreContext{Context: context.Background()}
c.req.Query = query
c.req.Vars = b
c.req.role = "user"
c := context.Background()
res, err := c.execQuery()
if v, ok := opt["user_id"]; ok && len(v) != 0 {
c = context.WithValue(c, userIDKey, v)
}
var role string
if v, ok := opt["role"]; ok && len(v) != 0 {
role = v
} else {
role = "user"
}
stmts, err := buildRoleStmt([]byte(query), vars, role)
if err != nil {
logger.Fatal().Err(err).Msg("graphql query failed")
errlog.Fatal().Err(err).Msg("graphql query failed")
}
st := stmts[0]
buf := &bytes.Buffer{}
t := fasttemplate.New(st.sql, openVar, closeVar)
_, err = t.ExecuteFunc(buf, argMap(c, vars))
if err != nil {
errlog.Fatal().Err(err).Send()
}
finalSQL := buf.String()
tx, err := db.Begin(c)
if err != nil {
errlog.Fatal().Err(err).Send()
}
defer tx.Rollback(c) //nolint: errcheck
if conf.DB.SetUserID {
if err := setLocalUserID(c, tx); err != nil {
errlog.Fatal().Err(err).Send()
}
}
var root []byte
if err = tx.QueryRow(context.Background(), finalSQL).Scan(&root); err != nil {
errlog.Fatal().Err(err).Msg("sql query failed")
}
if err := tx.Commit(c); err != nil {
errlog.Fatal().Err(err).Send()
}
val := make(map[string]interface{})
err = json.Unmarshal(res, &val)
err = json.Unmarshal(root, &val)
if err != nil {
logger.Fatal().Err(err).Msg("failed to deserialize json")
errlog.Fatal().Err(err).Send()
}
return val
}
//nolint: errcheck
func logFunc(args ...interface{}) {
for _, arg := range args {
if _, ok := arg.(map[string]interface{}); ok {
@ -99,6 +145,7 @@ func logFunc(args ...interface{}) {
}
}
//nolint: errcheck
func setFakeFuncs(f *goja.Object) {
gofakeit.Seed(0)
@ -156,10 +203,9 @@ func setFakeFuncs(f *goja.Object) {
f.Set("transmission_gear_type", gofakeit.TransmissionGearType)
// Text
f.Set("word", gofakeit.Word)
f.Set("sentence", gofakeit.Sentence)
f.Set("paragrph", gofakeit.Paragraph)
f.Set("paragraph", gofakeit.Paragraph)
f.Set("question", gofakeit.Question)
f.Set("quote", gofakeit.Quote)

View File

@ -7,19 +7,22 @@ import (
func cmdServ(cmd *cobra.Command, args []string) {
var err error
initWatcher(confPath)
if conf, err = initConf(); err != nil {
logger.Fatal().Err(err).Msg("failed to read config")
fatalInProd(err, "failed to read config")
}
db, err = initDBPool(conf)
if err != nil {
logger.Fatal().Err(err).Msg("failed to connect to database")
fatalInProd(err, "failed to connect to database")
}
initCompiler()
initResolvers()
initAllowList(confPath)
initPreparedList()
initWatcher(confPath)
initPreparedList(confPath)
startHTTP()
}

View File

@ -5,6 +5,7 @@ import (
"os"
"regexp"
"strings"
"time"
"unicode"
"github.com/gobuffalo/flect"
@ -19,6 +20,7 @@ type config struct {
HostPort string `mapstructure:"host_port"`
Host string
Port string
HTTPGZip bool `mapstructure:"http_compress"`
WebUI bool `mapstructure:"web_ui"`
LogLevel string `mapstructure:"log_level"`
EnableTracing bool `mapstructure:"enable_tracing"`
@ -31,58 +33,74 @@ type config struct {
Inflections map[string]string
Auth struct {
Type string
Cookie string
CredsInHeader bool `mapstructure:"creds_in_header"`
Rails struct {
Version string
SecretKeyBase string `mapstructure:"secret_key_base"`
URL string
Password string
MaxIdle int `mapstructure:"max_idle"`
MaxActive int `mapstructure:"max_active"`
Salt string
SignSalt string `mapstructure:"sign_salt"`
AuthSalt string `mapstructure:"auth_salt"`
}
JWT struct {
Provider string
Secret string
PubKeyFile string `mapstructure:"public_key_file"`
PubKeyType string `mapstructure:"public_key_type"`
}
}
Auth configAuth
Auths []configAuth
DB struct {
Type string
Host string
Port uint16
DBName string
User string
Password string
Schema string
PoolSize int32 `mapstructure:"pool_size"`
MaxRetries int `mapstructure:"max_retries"`
LogLevel string `mapstructure:"log_level"`
SetUserID bool `mapstructure:"set_user_id"`
Type string
Host string
Port uint16
DBName string
User string
Password string
Schema string
PoolSize int32 `mapstructure:"pool_size"`
MaxRetries int `mapstructure:"max_retries"`
SetUserID bool `mapstructure:"set_user_id"`
PingTimeout time.Duration `mapstructure:"ping_timeout"`
Vars map[string]string `mapstructure:"variables"`
Defaults struct {
Filters []string
Blocklist []string
}
Vars map[string]string `mapstructure:"variables"`
Blocklist []string
Tables []configTable
} `mapstructure:"database"`
Actions []configAction
Tables []configTable
RolesQuery string `mapstructure:"roles_query"`
Roles []configRole
RolesQuery string `mapstructure:"roles_query"`
Roles []configRole
roles map[string]*configRole
abacEnabled bool
}
type configAuth struct {
Name string
Type string
Cookie string
CredsInHeader bool `mapstructure:"creds_in_header"`
Rails struct {
Version string
SecretKeyBase string `mapstructure:"secret_key_base"`
URL string
Password string
MaxIdle int `mapstructure:"max_idle"`
MaxActive int `mapstructure:"max_active"`
Salt string
SignSalt string `mapstructure:"sign_salt"`
AuthSalt string `mapstructure:"auth_salt"`
}
JWT struct {
Provider string
Secret string
PubKeyFile string `mapstructure:"public_key_file"`
PubKeyType string `mapstructure:"public_key_type"`
}
Header struct {
Name string
Value string
Exists bool
}
}
type configColumn struct {
Name string
Type string
ForeignKey string `mapstructure:"related_to"`
}
type configTable struct {
@ -90,6 +108,7 @@ type configTable struct {
Table string
Blocklist []string
Remotes []configRemote
Columns []configColumn
}
type configRemote struct {
@ -149,6 +168,12 @@ type configRole struct {
tablesMap map[string]*configRoleTable
}
type configAction struct {
Name string
SQL string
AuthName string `mapstructure:"auth_name"`
}
func newConfig(name string) *viper.Viper {
vi := viper.New()
@ -177,9 +202,10 @@ func newConfig(name string) *viper.Viper {
vi.SetDefault("database.schema", "public")
vi.SetDefault("env", "development")
vi.BindEnv("env", "GO_ENV")
vi.BindEnv("HOST", "HOST")
vi.BindEnv("PORT", "PORT")
vi.BindEnv("env", "GO_ENV") //nolint: errcheck
vi.BindEnv("HOST", "HOST") //nolint: errcheck
vi.BindEnv("PORT", "PORT") //nolint: errcheck
vi.SetDefault("auth.rails.max_idle", 80)
vi.SetDefault("auth.rails.max_active", 12000)
@ -221,16 +247,16 @@ func (c *config) Init(vi *viper.Viper) error {
}
c.RolesQuery = sanitize(c.RolesQuery)
rolesMap := make(map[string]struct{})
c.roles = make(map[string]*configRole)
for i := range c.Roles {
role := &c.Roles[i]
if _, ok := rolesMap[role.Name]; ok {
logger.Fatal().Msgf("duplicate role '%s' found", role.Name)
if _, ok := c.roles[role.Name]; ok {
errlog.Fatal().Msgf("duplicate role '%s' found", role.Name)
}
role.Name = sanitize(role.Name)
role.Name = strings.ToLower(role.Name)
role.Match = sanitize(role.Match)
role.tablesMap = make(map[string]*configRoleTable)
@ -238,18 +264,35 @@ func (c *config) Init(vi *viper.Viper) error {
role.tablesMap[table.Name] = &role.Tables[n]
}
rolesMap[role.Name] = struct{}{}
c.roles[role.Name] = role
}
if _, ok := rolesMap["user"]; !ok {
c.Roles = append(c.Roles, configRole{Name: "user"})
if _, ok := c.roles["user"]; !ok {
u := configRole{Name: "user"}
c.Roles = append(c.Roles, u)
c.roles["user"] = &u
}
if _, ok := rolesMap["anon"]; !ok {
if _, ok := c.roles["anon"]; !ok {
logger.Warn().Msg("unauthenticated requests will be blocked. no role 'anon' defined")
c.AuthFailBlock = true
}
if len(c.RolesQuery) == 0 {
c.abacEnabled = false
} else {
switch len(c.Roles) {
case 0, 1:
c.abacEnabled = false
case 2:
_, ok1 := c.roles["anon"]
_, ok2 := c.roles["user"]
c.abacEnabled = !(ok1 && ok2)
default:
c.abacEnabled = true
}
}
c.validate()
return nil
@ -258,26 +301,48 @@ func (c *config) Init(vi *viper.Viper) error {
func (c *config) validate() {
rm := make(map[string]struct{})
for i := range c.Roles {
name := c.Roles[i].Name
for _, v := range c.Roles {
name := strings.ToLower(v.Name)
if _, ok := rm[name]; ok {
logger.Fatal().Msgf("duplicate config for role '%s'", c.Roles[i].Name)
errlog.Fatal().Msgf("duplicate config for role '%s'", v.Name)
}
rm[name] = struct{}{}
}
tm := make(map[string]struct{})
for i := range c.Tables {
name := c.Tables[i].Name
for _, v := range c.Tables {
name := strings.ToLower(v.Name)
if _, ok := tm[name]; ok {
logger.Fatal().Msgf("duplicate config for table '%s'", c.Tables[i].Name)
errlog.Fatal().Msgf("duplicate config for table '%s'", v.Name)
}
tm[name] = struct{}{}
}
am := make(map[string]struct{})
for _, v := range c.Auths {
name := strings.ToLower(v.Name)
if _, ok := am[name]; ok {
errlog.Fatal().Msgf("duplicate config for auth '%s'", v.Name)
}
am[name] = struct{}{}
}
for _, v := range c.Actions {
if len(v.AuthName) == 0 {
continue
}
authName := strings.ToLower(v.AuthName)
if _, ok := am[authName]; !ok {
errlog.Fatal().Msgf("invalid auth_name for action '%s'", v.Name)
}
}
if len(c.RolesQuery) == 0 {
logger.Warn().Msgf("no 'roles_query' defined.")
}
@ -289,7 +354,7 @@ func (c *config) getAliasMap() map[string][]string {
for i := range c.Tables {
t := c.Tables[i]
if len(t.Table) == 0 {
if len(t.Table) == 0 || len(t.Columns) != 0 {
continue
}
@ -298,6 +363,15 @@ func (c *config) getAliasMap() map[string][]string {
return m
}
func (c *config) isABACEnabled() bool {
return c.abacEnabled
}
func (c *config) isAnonRoleDefined() bool {
_, ok := c.roles["anon"]
return ok
}
var varRe1 = regexp.MustCompile(`(?mi)\$([a-zA-Z0-9_.]+)`)
var varRe2 = regexp.MustCompile(`\{\{([a-zA-Z0-9_.]+)\}\}`)
@ -315,3 +389,31 @@ func sanitize(s string) string {
return strings.ToLower(m)
})
}
func getConfigName() string {
if len(os.Getenv("GO_ENV")) == 0 {
return "dev"
}
ge := strings.ToLower(os.Getenv("GO_ENV"))
switch {
case strings.HasPrefix(ge, "pro"):
return "prod"
case strings.HasPrefix(ge, "sta"):
return "stage"
case strings.HasPrefix(ge, "tes"):
return "test"
case strings.HasPrefix(ge, "dev"):
return "dev"
}
return ge
}
func isDev() bool {
return strings.HasPrefix(os.Getenv("GO_ENV"), "dev")
}

164
serv/config_compile.go Normal file
View File

@ -0,0 +1,164 @@
package serv
import (
"fmt"
"strings"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
)
func addTables(c *config, di *psql.DBInfo) error {
for _, t := range c.Tables {
if len(t.Table) == 0 || len(t.Columns) == 0 {
continue
}
if err := addTable(di, t.Columns, t); err != nil {
return err
}
}
return nil
}
func addTable(di *psql.DBInfo, cols []configColumn, t configTable) error {
bc, ok := di.GetColumn(t.Table, t.Name)
if !ok {
return fmt.Errorf(
"Column '%s' not found on table '%s'",
t.Name, t.Table)
}
if bc.Type != "json" && bc.Type != "jsonb" {
return fmt.Errorf(
"Column '%s' in table '%s' is of type '%s'. Only JSON or JSONB is valid",
t.Name, t.Table, bc.Type)
}
table := psql.DBTable{
Name: t.Name,
Key: strings.ToLower(t.Name),
Type: bc.Type,
}
columns := make([]psql.DBColumn, 0, len(cols))
for i := range cols {
c := cols[i]
columns = append(columns, psql.DBColumn{
Name: c.Name,
Key: strings.ToLower(c.Name),
Type: c.Type,
})
}
di.AddTable(table, columns)
bc.FKeyTable = t.Name
return nil
}
func addForeignKeys(c *config, di *psql.DBInfo) error {
for _, t := range c.Tables {
for _, c := range t.Columns {
if len(c.ForeignKey) == 0 {
continue
}
if err := addForeignKey(di, c, t); err != nil {
return err
}
}
}
return nil
}
func addForeignKey(di *psql.DBInfo, c configColumn, t configTable) error {
c1, ok := di.GetColumn(t.Name, c.Name)
if !ok {
return fmt.Errorf(
"Invalid table '%s' or column '%s' in config",
t.Name, c.Name)
}
v := strings.SplitN(c.ForeignKey, ".", 2)
if len(v) != 2 {
return fmt.Errorf(
"Invalid foreign_key in config for table '%s' and column '%s",
t.Name, c.Name)
}
fkt, fkc := v[0], v[1]
c2, ok := di.GetColumn(fkt, fkc)
if !ok {
return fmt.Errorf(
"Invalid foreign_key in config for table '%s' and column '%s",
t.Name, c.Name)
}
c1.FKeyTable = fkt
c1.FKeyColID = []int16{c2.ID}
return nil
}
func addRoles(c *config, qc *qcode.Compiler) error {
for _, r := range c.Roles {
for _, t := range r.Tables {
if err := addRole(qc, r, t); err != nil {
return err
}
}
}
return nil
}
func addRole(qc *qcode.Compiler, r configRole, t configRoleTable) error {
blockFilter := []string{"false"}
query := qcode.QueryConfig{
Limit: t.Query.Limit,
Filters: t.Query.Filters,
Columns: t.Query.Columns,
DisableFunctions: t.Query.DisableFunctions,
}
if t.Query.Block {
query.Filters = blockFilter
}
insert := qcode.InsertConfig{
Filters: t.Insert.Filters,
Columns: t.Insert.Columns,
Presets: t.Insert.Presets,
}
if t.Insert.Block {
insert.Filters = blockFilter
}
update := qcode.UpdateConfig{
Filters: t.Update.Filters,
Columns: t.Update.Columns,
Presets: t.Update.Presets,
}
if t.Update.Block {
update.Filters = blockFilter
}
delete := qcode.DeleteConfig{
Filters: t.Delete.Filters,
Columns: t.Delete.Columns,
}
if t.Delete.Block {
delete.Filters = blockFilter
}
return qc.AddRole(r.Name, t.Name, qcode.TRConfig{
Query: query,
Insert: insert,
Update: update,
Delete: delete,
})
}

View File

@ -8,20 +8,15 @@ import (
"fmt"
"io"
"net/http"
"sync"
"time"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/qcode"
"github.com/jackc/pgx/v4"
"github.com/valyala/fasttemplate"
)
const (
empty = ""
)
type coreContext struct {
req gqlReq
res gqlResp
@ -32,6 +27,10 @@ func (c *coreContext) handleReq(w io.Writer, req *http.Request) error {
c.req.ref = req.Referer()
c.req.hdr = req.Header
if len(c.req.Vars) == 2 {
c.req.Vars = nil
}
if authCheck(c) {
c.req.role = "user"
} else {
@ -47,90 +46,54 @@ func (c *coreContext) handleReq(w io.Writer, req *http.Request) error {
}
func (c *coreContext) execQuery() ([]byte, error) {
var err error
var skipped uint32
var qc *qcode.QCode
var data []byte
logger.Debug().Str("role", c.req.role).Msg(c.req.Query)
var st *stmt
var err error
if conf.Production {
var ps *preparedItem
data, ps, err = c.resolvePreparedSQL()
data, st, err = c.resolvePreparedSQL()
if err != nil {
return nil, err
}
logger.Error().
Err(err).
Str("default_role", c.req.role).
Msg(c.req.Query)
skipped = ps.skipped
qc = ps.qc
return nil, errors.New("query failed. check logs for error")
}
} else {
data, skipped, err = c.resolveSQL()
if err != nil {
if data, st, err = c.resolveSQL(); err != nil {
return nil, err
}
}
if len(data) == 0 || skipped == 0 {
return data, nil
}
sel := qc.Selects
h := xxhash.New()
// fetch the field name used within the db response json
// that are used to mark insertion points and the mapping between
// those field names and their select objects
fids, sfmap := parentFieldIds(h, sel, skipped)
// fetch the field values of the marked insertion points
// these values contain the id to be used with fetching remote data
from := jsn.Get(data, fids)
var to []jsn.Field
switch {
case len(from) == 1:
to, err = c.resolveRemote(c.req.hdr, h, from[0], sel, sfmap)
case len(from) > 1:
to, err = c.resolveRemotes(c.req.hdr, h, from, sel, sfmap)
default:
return nil, errors.New("something wrong no remote ids found in db response")
}
if err != nil {
return nil, err
}
var ob bytes.Buffer
err = jsn.Replace(&ob, data, from, to)
if err != nil {
return nil, err
}
return ob.Bytes(), nil
return execRemoteJoin(st, data, c.req.hdr)
}
func (c *coreContext) resolvePreparedSQL() ([]byte, *preparedItem, error) {
tx, err := db.Begin(c)
if err != nil {
return nil, nil, err
func (c *coreContext) resolvePreparedSQL() ([]byte, *stmt, error) {
var tx pgx.Tx
var err error
qt := qcode.GetQType(c.req.Query)
mutation := (qt == qcode.QTMutation)
useRoleQuery := conf.isABACEnabled() && mutation
useTx := useRoleQuery || conf.DB.SetUserID
if useTx {
if tx, err = db.Begin(c.Context); err != nil {
return nil, nil, err
}
defer tx.Rollback(c) //nolint: errcheck
}
defer tx.Rollback(c)
if conf.DB.SetUserID {
if err := c.setLocalUserID(tx); err != nil {
if err := setLocalUserID(c.Context, tx); err != nil {
return nil, nil, err
}
}
var role string
mutation := isMutation(c.req.Query)
useRoleQuery := len(conf.RolesQuery) != 0 && mutation
if useRoleQuery {
if role, err = c.executeRoleQuery(tx); err != nil {
@ -140,80 +103,102 @@ func (c *coreContext) resolvePreparedSQL() ([]byte, *preparedItem, error) {
} else if v := c.Value(userRoleKey); v != nil {
role = v.(string)
} else if mutation {
} else {
role = c.req.role
}
ps, ok := _preparedList[gqlHash(c.req.Query, c.req.Vars, role)]
ps, ok := _preparedList[stmtHash(allow.QueryName(c.req.Query), role)]
if !ok {
return nil, nil, errUnauthorized
}
var root []byte
vars := argList(c, ps.args)
var row pgx.Row
if mutation {
err = tx.QueryRow(c, ps.stmt.SQL, vars...).Scan(&root)
} else {
err = tx.QueryRow(c, ps.stmt.SQL, vars...).Scan(&c.req.role, &root)
}
vars, err := argList(c, ps.args)
if err != nil {
return nil, nil, err
}
if err := tx.Commit(c); err != nil {
if useTx {
row = tx.QueryRow(c.Context, ps.sd.SQL, vars...)
} else {
row = db.QueryRow(c.Context, ps.sd.SQL, vars...)
}
if ps.roleArg {
err = row.Scan(&role, &root)
} else {
err = row.Scan(&root)
}
if len(role) == 0 {
logger.Debug().Str("default_role", c.req.role).Msg(c.req.Query)
} else {
logger.Debug().Str("default_role", c.req.role).Str("role", role).Msg(c.req.Query)
}
if err != nil {
return nil, nil, err
}
return root, ps, nil
c.req.role = role
if useTx {
if err := tx.Commit(c.Context); err != nil {
return nil, nil, err
}
}
return root, &ps.st, nil
}
func (c *coreContext) resolveSQL() ([]byte, uint32, error) {
tx, err := db.Begin(c)
if err != nil {
return nil, 0, err
}
defer tx.Rollback(c)
func (c *coreContext) resolveSQL() ([]byte, *stmt, error) {
var tx pgx.Tx
var err error
mutation := isMutation(c.req.Query)
useRoleQuery := len(conf.RolesQuery) != 0 && mutation
qt := qcode.GetQType(c.req.Query)
mutation := (qt == qcode.QTMutation)
useRoleQuery := conf.isABACEnabled() && mutation
useTx := useRoleQuery || conf.DB.SetUserID
if useTx {
if tx, err = db.Begin(c.Context); err != nil {
return nil, nil, err
}
defer tx.Rollback(c.Context) //nolint: errcheck
}
if conf.DB.SetUserID {
if err := setLocalUserID(c.Context, tx); err != nil {
return nil, nil, err
}
}
if useRoleQuery {
if c.req.role, err = c.executeRoleQuery(tx); err != nil {
return nil, 0, err
return nil, nil, err
}
} else if v := c.Value(userRoleKey); v != nil {
c.req.role = v.(string)
}
stmts, err := c.buildStmt()
stmts, err := buildStmt(qt, []byte(c.req.Query), c.req.Vars, c.req.role)
if err != nil {
return nil, 0, err
}
var st *stmt
if mutation {
st = findStmt(c.req.role, stmts)
} else {
st = &stmts[0]
return nil, nil, err
}
st := &stmts[0]
t := fasttemplate.New(st.sql, openVar, closeVar)
buf := &bytes.Buffer{}
_, err = t.ExecuteFunc(buf, argMap(c))
if err == errNoUserID {
logger.Warn().Msg("no user id found. query requires an authenicated request")
}
_, err = t.ExecuteFunc(buf, argMap(c, c.req.Vars))
if err != nil {
return nil, 0, err
return nil, nil, err
}
finalSQL := buf.String()
var stime time.Time
@ -222,197 +207,70 @@ func (c *coreContext) resolveSQL() ([]byte, uint32, error) {
stime = time.Now()
}
if conf.DB.SetUserID {
if err := c.setLocalUserID(tx); err != nil {
return nil, 0, err
var root []byte
var role string
var row pgx.Row
defaultRole := c.req.role
if useTx {
row = tx.QueryRow(c.Context, finalSQL)
} else {
row = db.QueryRow(c.Context, finalSQL)
}
if len(stmts) > 1 {
err = row.Scan(&role, &root)
} else {
err = row.Scan(&root)
}
if len(role) == 0 {
logger.Debug().Str("default_role", defaultRole).Msg(c.req.Query)
} else {
logger.Debug().Str("default_role", defaultRole).Str("role", role).Msg(c.req.Query)
}
if err != nil {
return nil, nil, err
}
if useTx {
if err := tx.Commit(c.Context); err != nil {
return nil, nil, err
}
}
var root []byte
if mutation {
err = tx.QueryRow(c, finalSQL).Scan(&root)
} else {
err = tx.QueryRow(c, finalSQL).Scan(&c.req.role, &root)
}
if err != nil {
return nil, 0, err
if allowList.IsPersist() {
if err := allowList.Add(c.req.Vars, c.req.Query, c.req.ref); err != nil {
return nil, nil, err
}
}
if err := tx.Commit(c); err != nil {
return nil, 0, err
}
if mutation {
st = findStmt(c.req.role, stmts)
} else {
st = &stmts[0]
}
if conf.EnableTracing && len(st.qc.Selects) != 0 {
c.addTrace(
st.qc.Selects,
st.qc.Selects[0].ID,
stime)
}
if conf.Production == false {
_allowList.add(&c.req)
}
return root, st.skipped, nil
}
func (c *coreContext) resolveRemote(
hdr http.Header,
h *xxhash.Digest,
field jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// replacement data for the marked insertion points
// key and value will be replaced by whats below
toA := [1]jsn.Field{}
to := toA[:1]
// use the json key to find the related Select object
k1 := xxhash.Sum64(field.Key)
s, ok := sfmap[k1]
if !ok {
return nil, nil
}
p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Table, p.Table)
r, ok := rmap[k2]
if !ok {
return nil, nil
}
id := jsn.Value(field.Value)
if len(id) == 0 {
return nil, nil
}
st := time.Now()
b, err := r.Fn(hdr, id)
if err != nil {
return nil, err
if len(stmts) > 1 {
if st = findStmt(role, stmts); st == nil {
return nil, nil, fmt.Errorf("invalid role '%s' returned", role)
}
}
if conf.EnableTracing {
c.addTrace(sel, s.ID, st)
for _, id := range st.qc.Roots {
c.addTrace(st.qc.Selects, id, stime)
}
}
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
var ob bytes.Buffer
if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
return nil, err
}
} else {
ob.WriteString("null")
}
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
return to, nil
}
func (c *coreContext) resolveRemotes(
hdr http.Header,
h *xxhash.Digest,
from []jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// replacement data for the marked insertion points
// key and value will be replaced by whats below
to := make([]jsn.Field, len(from))
var wg sync.WaitGroup
wg.Add(len(from))
var cerr error
for i, id := range from {
// use the json key to find the related Select object
k1 := xxhash.Sum64(id.Key)
s, ok := sfmap[k1]
if !ok {
return nil, nil
}
p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Table, p.Table)
r, ok := rmap[k2]
if !ok {
return nil, nil
}
id := jsn.Value(id.Value)
if len(id) == 0 {
return nil, nil
}
go func(n int, id []byte, s *qcode.Select) {
defer wg.Done()
st := time.Now()
b, err := r.Fn(hdr, id)
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Table, err)
return
}
if conf.EnableTracing {
c.addTrace(sel, s.ID, st)
}
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
var ob bytes.Buffer
if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Table, err)
return
}
} else {
ob.WriteString("null")
}
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
}(i, id, s)
}
wg.Wait()
return to, cerr
return root, st, nil
}
func (c *coreContext) executeRoleQuery(tx pgx.Tx) (string, error) {
userID := c.Value(userIDKey)
if userID == nil {
return "anon", nil
}
var role string
row := tx.QueryRow(c, "_sg_get_role", c.req.role, 1)
row := tx.QueryRow(c.Context, "_sg_get_role", userID, c.req.role)
if err := row.Scan(&role); err != nil {
return "", err
@ -421,15 +279,6 @@ func (c *coreContext) executeRoleQuery(tx pgx.Tx) (string, error) {
return role, nil
}
func (c *coreContext) setLocalUserID(tx pgx.Tx) error {
var err error
if v := c.Value(userIDKey); v != nil {
_, err = tx.Exec(c, fmt.Sprintf(`SET LOCAL "user.id" = %s;`, v))
}
return err
}
func (c *coreContext) render(w io.Writer, data []byte) error {
c.res.Data = json.RawMessage(data)
return json.NewEncoder(w).Encode(c.res)
@ -451,15 +300,15 @@ func (c *coreContext) addTrace(sel []qcode.Select, id int32, st time.Time) {
c.res.Extensions.Tracing.Duration = du
n := 1
for i := id; i != 0; i = sel[i].ParentID {
for i := id; i != -1; i = sel[i].ParentID {
n++
}
path := make([]string, n)
n--
for i := id; ; i = sel[i].ParentID {
path[n] = sel[i].Table
if sel[i].ID == 0 {
path[n] = sel[i].Name
if sel[i].ParentID == -1 {
break
}
n--
@ -468,7 +317,7 @@ func (c *coreContext) addTrace(sel []qcode.Select, id int32, st time.Time) {
tr := resolver{
Path: path,
ParentType: "Query",
FieldName: sel[id].Table,
FieldName: sel[id].Name,
ReturnType: "object",
StartOffset: 1,
Duration: du,
@ -478,6 +327,15 @@ func (c *coreContext) addTrace(sel []qcode.Select, id int32, st time.Time) {
append(c.res.Extensions.Tracing.Execution.Resolvers, tr)
}
func setLocalUserID(c context.Context, tx pgx.Tx) error {
var err error
if v := c.Value(userIDKey); v != nil {
_, err = tx.Exec(context.Background(), fmt.Sprintf(`SET LOCAL "user.id" = %s;`, v))
}
return err
}
func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
[][]byte,
map[uint64]*qcode.Select) {
@ -502,12 +360,12 @@ func parentFieldIds(h *xxhash.Digest, sel []qcode.Select, skipped uint32) (
for i := range sel {
s := &sel[i]
if isSkipped(skipped, uint32(s.ID)) == false {
if !isSkipped(skipped, uint32(s.ID)) {
continue
}
p := sel[s.ParentID]
k := mkkey(h, s.Table, p.Table)
k := mkkey(h, s.Name, p.Name)
if r, ok := rmap[k]; ok {
fm[n] = r.IDField

View File

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/dosco/super-graph/psql"
@ -17,133 +18,170 @@ type stmt struct {
sql string
}
func (c *coreContext) buildStmt() ([]stmt, error) {
var vars map[string]json.RawMessage
func buildStmt(qt qcode.QType, gql, vars []byte, role string) ([]stmt, error) {
switch qt {
case qcode.QTMutation:
return buildRoleStmt(gql, vars, role)
if len(c.req.Vars) != 0 {
if err := json.Unmarshal(c.req.Vars, &vars); err != nil {
case qcode.QTQuery:
if role == "anon" {
return buildRoleStmt(gql, vars, "anon")
}
if conf.isABACEnabled() {
return buildMultiStmt(gql, vars)
}
return buildRoleStmt(gql, vars, "user")
default:
return nil, fmt.Errorf("unknown query type '%d'", qt)
}
}
func buildRoleStmt(gql, vars []byte, role string) ([]stmt, error) {
ro, ok := conf.roles[role]
if !ok {
return nil, fmt.Errorf(`roles '%s' not defined in config`, role)
}
var vm map[string]json.RawMessage
var err error
if len(vars) != 0 {
if err := json.Unmarshal(vars, &vm); err != nil {
return nil, err
}
}
gql := []byte(c.req.Query)
if len(conf.Roles) == 0 {
return nil, errors.New(`no roles found ('user' and 'anon' required)`)
}
qc, err := qcompile.Compile(gql, conf.Roles[0].Name)
qc, err := qcompile.Compile(gql, ro.Name)
if err != nil {
return nil, err
}
stmts := make([]stmt, 0, len(conf.Roles))
mutation := (qc.Type != qcode.QTQuery)
// For the 'anon' role in production only compile
// queries for tables defined in the config file.
if conf.Production && ro.Name == "anon" && !hasTablesWithConfig(qc, ro) {
return nil, errors.New("query contains tables with no 'anon' role config")
}
stmts := []stmt{stmt{role: ro, qc: qc}}
w := &bytes.Buffer{}
for i := 1; i < len(conf.Roles); i++ {
skipped, err := pcompile.Compile(qc, w, psql.Variables(vm))
if err != nil {
return nil, err
}
stmts[0].skipped = skipped
stmts[0].sql = w.String()
return stmts, nil
}
func buildMultiStmt(gql, vars []byte) ([]stmt, error) {
var vm map[string]json.RawMessage
var err error
if len(vars) != 0 {
if err := json.Unmarshal(vars, &vm); err != nil {
return nil, err
}
}
if len(conf.RolesQuery) == 0 {
return buildRoleStmt(gql, vars, "user")
}
stmts := make([]stmt, 0, len(conf.Roles))
w := &bytes.Buffer{}
for i := 0; i < len(conf.Roles); i++ {
role := &conf.Roles[i]
// For mutations only render sql for a single role from the request
if mutation && len(c.req.role) != 0 && role.Name != c.req.role {
if role.Name == "anon" {
continue
}
qc, err = qcompile.Compile(gql, role.Name)
qc, err := qcompile.Compile(gql, role.Name)
if err != nil {
return nil, err
}
if conf.Production && role.Name == "anon" {
if _, ok := role.tablesMap[qc.Selects[0].Table]; !ok {
continue
}
}
stmts = append(stmts, stmt{role: role, qc: qc})
if mutation {
skipped, err := pcompile.Compile(qc, w, psql.Variables(vars))
if err != nil {
return nil, err
}
s := &stmts[len(stmts)-1]
s.skipped = skipped
s.sql = w.String()
w.Reset()
}
}
if mutation {
return stmts, nil
}
io.WriteString(w, `SELECT "_sg_auth_info"."role", (CASE "_sg_auth_info"."role" `)
for _, s := range stmts {
io.WriteString(w, `WHEN '`)
io.WriteString(w, s.role.Name)
io.WriteString(w, `' THEN (`)
s.skipped, err = pcompile.Compile(s.qc, w, psql.Variables(vars))
skipped, err := pcompile.Compile(qc, w, psql.Variables(vm))
if err != nil {
return nil, err
}
io.WriteString(w, `) `)
}
io.WriteString(w, `END) FROM (`)
if len(conf.RolesQuery) == 0 {
v := c.Value(userRoleKey)
io.WriteString(w, `VALUES ("`)
if v != nil {
io.WriteString(w, v.(string))
} else {
io.WriteString(w, c.req.role)
}
io.WriteString(w, `")) AS "_sg_auth_info"(role) LIMIT 1;`)
} else {
io.WriteString(w, `SELECT (CASE WHEN EXISTS (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, `) THEN `)
io.WriteString(w, `(SELECT (CASE`)
for _, s := range stmts {
if len(s.role.Match) == 0 {
continue
}
io.WriteString(w, ` WHEN `)
io.WriteString(w, s.role.Match)
io.WriteString(w, ` THEN '`)
io.WriteString(w, s.role.Name)
io.WriteString(w, `'`)
}
if len(c.req.role) == 0 {
io.WriteString(w, ` ELSE 'anon' END) FROM (`)
} else {
io.WriteString(w, ` ELSE '`)
io.WriteString(w, c.req.role)
io.WriteString(w, `' END) FROM (`)
}
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) ELSE '`)
if len(c.req.role) == 0 {
io.WriteString(w, `anon`)
} else {
io.WriteString(w, c.req.role)
}
io.WriteString(w, `' END) FROM (VALUES (1)) AS "_sg_auth_filler") AS "_sg_auth_info"(role) LIMIT 1; `)
s := &stmts[len(stmts)-1]
s.skipped = skipped
s.sql = w.String()
w.Reset()
}
stmts[0].sql = w.String()
stmts[0].role = nil
sql, err := renderUserQuery(stmts, vm)
if err != nil {
return nil, err
}
stmts[0].sql = sql
return stmts, nil
}
//nolint: errcheck
func renderUserQuery(
stmts []stmt, vars map[string]json.RawMessage) (string, error) {
w := &bytes.Buffer{}
io.WriteString(w, `SELECT "_sg_auth_info"."role", (CASE "_sg_auth_info"."role" `)
for _, s := range stmts {
if len(s.role.Match) == 0 &&
s.role.Name != "user" && s.role.Name != "anon" {
continue
}
io.WriteString(w, `WHEN '`)
io.WriteString(w, s.role.Name)
io.WriteString(w, `' THEN (`)
io.WriteString(w, s.sql)
io.WriteString(w, `) `)
}
io.WriteString(w, `END) FROM (SELECT (CASE WHEN EXISTS (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, `) THEN `)
io.WriteString(w, `(SELECT (CASE`)
for _, s := range stmts {
if len(s.role.Match) == 0 {
continue
}
io.WriteString(w, ` WHEN `)
io.WriteString(w, s.role.Match)
io.WriteString(w, ` THEN '`)
io.WriteString(w, s.role.Name)
io.WriteString(w, `'`)
}
io.WriteString(w, ` ELSE 'user' END) FROM (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler") AS "_sg_auth_info"(role) LIMIT 1; `)
return w.String(), nil
}
func hasTablesWithConfig(qc *qcode.QCode, role *configRole) bool {
for _, id := range qc.Roots {
t, err := schema.GetTable(qc.Selects[id].Name)
if err != nil {
return false
}
if _, ok := role.tablesMap[t.Name]; !ok {
return false
}
}
return true
}

197
serv/core_remote.go Normal file
View File

@ -0,0 +1,197 @@
package serv
import (
"bytes"
"errors"
"fmt"
"net/http"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn"
"github.com/dosco/super-graph/qcode"
)
func execRemoteJoin(st *stmt, data []byte, hdr http.Header) ([]byte, error) {
var err error
if len(data) == 0 || st.skipped == 0 {
return data, nil
}
sel := st.qc.Selects
h := xxhash.New()
// fetch the field name used within the db response json
// that are used to mark insertion points and the mapping between
// those field names and their select objects
fids, sfmap := parentFieldIds(h, sel, st.skipped)
// fetch the field values of the marked insertion points
// these values contain the id to be used with fetching remote data
from := jsn.Get(data, fids)
var to []jsn.Field
switch {
case len(from) == 1:
to, err = resolveRemote(hdr, h, from[0], sel, sfmap)
case len(from) > 1:
to, err = resolveRemotes(hdr, h, from, sel, sfmap)
default:
return nil, errors.New("something wrong no remote ids found in db response")
}
if err != nil {
return nil, err
}
var ob bytes.Buffer
err = jsn.Replace(&ob, data, from, to)
if err != nil {
return nil, err
}
return ob.Bytes(), nil
}
func resolveRemote(
hdr http.Header,
h *xxhash.Digest,
field jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// replacement data for the marked insertion points
// key and value will be replaced by whats below
toA := [1]jsn.Field{}
to := toA[:1]
// use the json key to find the related Select object
k1 := xxhash.Sum64(field.Key)
s, ok := sfmap[k1]
if !ok {
return nil, nil
}
p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Name, p.Name)
r, ok := rmap[k2]
if !ok {
return nil, nil
}
id := jsn.Value(field.Value)
if len(id) == 0 {
return nil, nil
}
//st := time.Now()
b, err := r.Fn(hdr, id)
if err != nil {
return nil, err
}
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
var ob bytes.Buffer
if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
return nil, err
}
} else {
ob.WriteString("null")
}
to[0] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
return to, nil
}
func resolveRemotes(
hdr http.Header,
h *xxhash.Digest,
from []jsn.Field,
sel []qcode.Select,
sfmap map[uint64]*qcode.Select) ([]jsn.Field, error) {
// replacement data for the marked insertion points
// key and value will be replaced by whats below
to := make([]jsn.Field, len(from))
var wg sync.WaitGroup
wg.Add(len(from))
var cerr error
for i, id := range from {
// use the json key to find the related Select object
k1 := xxhash.Sum64(id.Key)
s, ok := sfmap[k1]
if !ok {
return nil, nil
}
p := sel[s.ParentID]
// then use the Table nme in the Select and it's parent
// to find the resolver to use for this relationship
k2 := mkkey(h, s.Name, p.Name)
r, ok := rmap[k2]
if !ok {
return nil, nil
}
id := jsn.Value(id.Value)
if len(id) == 0 {
return nil, nil
}
go func(n int, id []byte, s *qcode.Select) {
defer wg.Done()
//st := time.Now()
b, err := r.Fn(hdr, id)
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Name, err)
return
}
if len(r.Path) != 0 {
b = jsn.Strip(b, r.Path)
}
var ob bytes.Buffer
if len(s.Cols) != 0 {
err = jsn.Filter(&ob, b, colsToList(s.Cols))
if err != nil {
cerr = fmt.Errorf("%s: %s", s.Name, err)
return
}
} else {
ob.WriteString("null")
}
to[n] = jsn.Field{Key: []byte(s.FieldName), Value: ob.Bytes()}
}(i, id, s)
}
wg.Wait()
return to, cerr
}

View File

@ -4,7 +4,7 @@ package serv
func Fuzz(data []byte) int {
gql := string(data)
isMutation(gql)
QueryName(gql)
gqlHash(gql, nil, "")
return 1

View File

@ -10,7 +10,6 @@ func TestFuzzCrashers(t *testing.T) {
}
for _, f := range crashers {
isMutation(f)
gqlHash(f, nil, "")
}
}

32
serv/health.go Normal file
View File

@ -0,0 +1,32 @@
package serv
import (
"context"
"net/http"
)
var healthyResponse = []byte("All's Well")
func health(w http.ResponseWriter, _ *http.Request) {
conn, err := db.Acquire(context.Background())
if err != nil {
errlog.Error().Err(err).Msg("error acquiring connection from pool")
w.WriteHeader(http.StatusInternalServerError)
return
}
ctx, cancel := context.WithTimeout(context.Background(), conf.DB.PingTimeout)
defer cancel()
if err := conn.Conn().Ping(ctx); err != nil {
errlog.Error().Err(err).Msg("error pinging database")
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := w.Write(healthyResponse); err != nil {
errlog.Error().Err(err).Msg("error writing healthy response")
w.WriteHeader(http.StatusInternalServerError)
return
}
}

View File

@ -8,8 +8,6 @@ import (
"net/http"
"strings"
"time"
"github.com/gorilla/websocket"
)
const (
@ -20,8 +18,6 @@ const (
)
var (
upgrader = websocket.Upgrader{}
errNoUserID = errors.New("no user_id available")
errUnauthorized = errors.New("not authorized")
)
@ -34,8 +30,6 @@ type gqlReq struct {
hdr http.Header
}
type variables map[string]json.RawMessage
type gqlResp struct {
Error string `json:"message,omitempty"`
Data json.RawMessage `json:"data,omitempty"`
@ -67,28 +61,27 @@ type resolver struct {
Duration time.Duration `json:"duration"`
}
func apiv1Http(w http.ResponseWriter, r *http.Request) {
func apiV1(w http.ResponseWriter, r *http.Request) {
ctx := &coreContext{Context: r.Context()}
if conf.AuthFailBlock && authCheck(ctx) == false {
//nolint: errcheck
if conf.AuthFailBlock && !authCheck(ctx) {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(gqlResp{Error: errUnauthorized.Error()})
return
}
b, err := ioutil.ReadAll(io.LimitReader(r.Body, maxReadBytes))
defer r.Body.Close()
if err != nil {
logger.Err(err).Msg("failed to read request body")
errlog.Error().Err(err).Msg("failed to read request body")
errorResp(w, err)
return
}
defer r.Body.Close()
err = json.Unmarshal(b, &ctx.req)
if err != nil {
logger.Err(err).Msg("failed to decode json request body")
errlog.Error().Err(err).Msg("failed to decode json request body")
errorResp(w, err)
return
}
@ -100,6 +93,7 @@ func apiv1Http(w http.ResponseWriter, r *http.Request) {
err = ctx.handleReq(w, r)
//nolint: errcheck
if err == errUnauthorized {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(gqlResp{Error: err.Error()})
@ -107,12 +101,13 @@ func apiv1Http(w http.ResponseWriter, r *http.Request) {
}
if err != nil {
logger.Err(err).Msg("failed to handle request")
errlog.Error().Err(err).Msg("failed to handle request")
errorResp(w, err)
return
}
}
//nolint: errcheck
func errorResp(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(gqlResp{Error: err.Error()})
}

165
serv/init.go Normal file
View File

@ -0,0 +1,165 @@
package serv
import (
"context"
"fmt"
"os"
"github.com/dosco/super-graph/allow"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/rs/zerolog"
)
func initLog() {
out := zerolog.ConsoleWriter{Out: os.Stderr}
logger = zerolog.New(out).With().Timestamp().Logger()
errlog = logger.With().Caller().Logger()
}
func initConf() (*config, error) {
vi := newConfig(getConfigName())
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
inherits := vi.GetString("inherits")
if len(inherits) != 0 {
vi = newConfig(inherits)
if err := vi.ReadInConfig(); err != nil {
return nil, err
}
if vi.IsSet("inherits") {
errlog.Fatal().Msgf("inherited config (%s) cannot itself inherit (%s)",
inherits,
vi.GetString("inherits"))
}
vi.SetConfigName(getConfigName())
if err := vi.MergeInConfig(); err != nil {
return nil, err
}
}
c := &config{}
if err := c.Init(vi); err != nil {
return nil, fmt.Errorf("unable to decode config, %v", err)
}
logLevel, err := zerolog.ParseLevel(c.LogLevel)
if err != nil {
errlog.Error().Err(err).Msg("error setting log_level")
}
zerolog.SetGlobalLevel(logLevel)
return c, nil
}
func initDB(c *config, useDB bool) (*pgx.Conn, error) {
config, _ := pgx.ParseConfig("")
config.Host = c.DB.Host
config.Port = c.DB.Port
config.User = c.DB.User
config.Password = c.DB.Password
config.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
if useDB {
config.Database = c.DB.DBName
}
switch c.LogLevel {
case "debug":
config.LogLevel = pgx.LogLevelDebug
case "info":
config.LogLevel = pgx.LogLevelInfo
case "warn":
config.LogLevel = pgx.LogLevelWarn
case "error":
config.LogLevel = pgx.LogLevelError
default:
config.LogLevel = pgx.LogLevelNone
}
config.Logger = NewSQLLogger(logger)
db, err := pgx.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initDBPool(c *config) (*pgxpool.Pool, error) {
config, _ := pgxpool.ParseConfig("")
config.ConnConfig.Host = c.DB.Host
config.ConnConfig.Port = c.DB.Port
config.ConnConfig.Database = c.DB.DBName
config.ConnConfig.User = c.DB.User
config.ConnConfig.Password = c.DB.Password
config.ConnConfig.RuntimeParams = map[string]string{
"application_name": c.AppName,
"search_path": c.DB.Schema,
}
switch c.LogLevel {
case "debug":
config.ConnConfig.LogLevel = pgx.LogLevelDebug
case "info":
config.ConnConfig.LogLevel = pgx.LogLevelInfo
case "warn":
config.ConnConfig.LogLevel = pgx.LogLevelWarn
case "error":
config.ConnConfig.LogLevel = pgx.LogLevelError
default:
config.ConnConfig.LogLevel = pgx.LogLevelNone
}
config.ConnConfig.Logger = NewSQLLogger(logger)
// if c.DB.MaxRetries != 0 {
// opt.MaxRetries = c.DB.MaxRetries
// }
if c.DB.PoolSize != 0 {
config.MaxConns = conf.DB.PoolSize
}
db, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
return nil, err
}
return db, nil
}
func initCompiler() {
var err error
qcompile, pcompile, err = initCompilers(conf)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize compilers")
}
}
func initAllowList(cpath string) {
var ac allow.Config
var err error
if !conf.Production {
ac = allow.Config{CreateIfNotExists: true, Persist: true}
}
allowList, err = allow.New(cpath, ac)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize allow list")
}
}

View File

@ -2,6 +2,7 @@ package serv
import "net/http"
//nolint: errcheck
func introspect(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{

View File

@ -3,111 +3,186 @@ package serv
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"github.com/dosco/super-graph/allow"
"github.com/dosco/super-graph/qcode"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/valyala/fasttemplate"
)
type preparedItem struct {
stmt *pgconn.StatementDescription
sd *pgconn.StatementDescription
args [][]byte
skipped uint32
qc *qcode.QCode
st stmt
roleArg bool
}
var (
_preparedList map[string]*preparedItem
)
func initPreparedList() {
func initPreparedList(cpath string) {
if allowList.IsPersist() {
return
}
_preparedList = make(map[string]*preparedItem)
if err := prepareRoleStmt(); err != nil {
logger.Fatal().Err(err).Msg("failed to prepare get role statement")
}
for _, v := range _allowList.list {
err := prepareStmt(v.gql, v.vars)
if err != nil {
logger.Warn().Str("gql", v.gql).Err(err).Send()
}
}
}
func prepareStmt(gql string, varBytes json.RawMessage) error {
if len(gql) == 0 {
return nil
}
c := &coreContext{Context: context.Background()}
c.req.Query = gql
c.req.Vars = varBytes
stmts, err := c.buildStmt()
tx, err := db.Begin(context.Background())
if err != nil {
return err
errlog.Fatal().Err(err).Send()
}
defer tx.Rollback(context.Background()) //nolint: errcheck
err = prepareRoleStmt(tx)
if err != nil {
errlog.Fatal().Err(err).Msg("failed to prepare get role statement")
}
if len(stmts) != 0 && stmts[0].qc.Type == qcode.QTQuery {
c.req.Vars = nil
if err := tx.Commit(context.Background()); err != nil {
errlog.Fatal().Err(err).Send()
}
for _, s := range stmts {
if len(s.sql) == 0 {
success := 0
list, err := allowList.Load()
if err != nil {
errlog.Fatal().Err(err).Send()
}
for _, v := range list {
if len(v.Query) == 0 {
continue
}
finalSQL, am := processTemplate(s.sql)
ctx := context.Background()
tx, err := db.Begin(ctx)
if err != nil {
return err
}
defer tx.Rollback(ctx)
pstmt, err := tx.Prepare(ctx, "", finalSQL)
if err != nil {
return err
err := prepareStmt(v)
if err == nil {
success++
continue
}
var key string
if s.role == nil {
key = gqlHash(gql, c.req.Vars, "")
if len(v.Vars) == 0 {
logger.Warn().Err(err).Msg(v.Query)
} else {
key = gqlHash(gql, c.req.Vars, s.role.Name)
logger.Warn().Err(err).Msgf("%s %s", v.Vars, v.Query)
}
}
logger.Info().
Msgf("Registered %d of %d queries from allow.list as prepared statements",
success, len(list))
}
func prepareStmt(item allow.Item) error {
gql := item.Query
vars := item.Vars
qt := qcode.GetQType(gql)
q := []byte(gql)
if len(vars) == 0 {
logger.Debug().Msgf("Prepared statement:\n%s\n", gql)
} else {
logger.Debug().Msgf("Prepared statement:\n%s\n%s\n", vars, gql)
}
tx, err := db.Begin(context.Background())
if err != nil {
return err
}
defer tx.Rollback(context.Background()) //nolint: errcheck
switch qt {
case qcode.QTQuery:
var stmts1 []stmt
var err error
if conf.isABACEnabled() {
stmts1, err = buildMultiStmt(q, vars)
} else {
stmts1, err = buildRoleStmt(q, vars, "user")
}
_preparedList[key] = &preparedItem{
stmt: pstmt,
args: am,
skipped: s.skipped,
qc: s.qc,
}
if err := tx.Commit(ctx); err != nil {
if err != nil {
return err
}
logger.Debug().Msg("Prepared statement role: user")
err = prepare(tx, stmts1, stmtHash(item.Name, "user"))
if err != nil {
return err
}
if conf.isAnonRoleDefined() {
logger.Debug().Msg("Prepared statement for role: anon")
stmts2, err := buildRoleStmt(q, vars, "anon")
if err != nil {
return err
}
err = prepare(tx, stmts2, stmtHash(item.Name, "anon"))
if err != nil {
return err
}
}
case qcode.QTMutation:
for _, role := range conf.Roles {
logger.Debug().Msgf("Prepared statement for role: %s", role.Name)
stmts, err := buildRoleStmt(q, vars, role.Name)
if err != nil {
return err
}
err = prepare(tx, stmts, stmtHash(item.Name, role.Name))
if err != nil {
return err
}
}
}
if err := tx.Commit(context.Background()); err != nil {
return err
}
return nil
}
func prepareRoleStmt() error {
if len(conf.RolesQuery) == 0 {
func prepare(tx pgx.Tx, st []stmt, key string) error {
finalSQL, am := processTemplate(st[0].sql)
sd, err := tx.Prepare(context.Background(), "", finalSQL)
if err != nil {
return err
}
_preparedList[key] = &preparedItem{
sd: sd,
args: am,
st: st[0],
roleArg: len(st) > 1,
}
return nil
}
// nolint: errcheck
func prepareRoleStmt(tx pgx.Tx) error {
if !conf.isABACEnabled() {
return nil
}
w := &bytes.Buffer{}
io.WriteString(w, `SELECT (CASE`)
io.WriteString(w, `SELECT (CASE WHEN EXISTS (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, `) THEN `)
io.WriteString(w, `(SELECT (CASE`)
for _, role := range conf.Roles {
if len(role.Match) == 0 {
continue
@ -121,19 +196,12 @@ func prepareRoleStmt() error {
io.WriteString(w, ` ELSE {{role}} END) FROM (`)
io.WriteString(w, conf.RolesQuery)
io.WriteString(w, `) AS "_sg_auth_roles_query"`)
io.WriteString(w, `) AS "_sg_auth_roles_query" LIMIT 1) `)
io.WriteString(w, `ELSE 'anon' END) FROM (VALUES (1)) AS "_sg_auth_filler" LIMIT 1; `)
roleSQL, _ := processTemplate(w.String())
ctx := context.Background()
tx, err := db.Begin(ctx)
if err != nil {
return err
}
defer tx.Rollback(ctx)
_, err = tx.Prepare(ctx, "_sg_get_role", roleSQL)
_, err := tx.Prepare(context.Background(), "_sg_get_role", roleSQL)
if err != nil {
return err
}
@ -142,19 +210,31 @@ func prepareRoleStmt() error {
}
func processTemplate(tmpl string) (string, [][]byte) {
t := fasttemplate.New(tmpl, `{{`, `}}`)
am := make([][]byte, 0, 5)
i := 0
st := struct {
vmap map[string]int
am [][]byte
i int
}{
vmap: make(map[string]int),
am: make([][]byte, 0, 5),
i: 0,
}
vmap := make(map[string]int)
return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {
if n, ok := vmap[tag]; ok {
execFunc := func(w io.Writer, tag string) (int, error) {
if n, ok := st.vmap[tag]; ok {
return w.Write([]byte(fmt.Sprintf("$%d", n)))
}
am = append(am, []byte(tag))
i++
vmap[tag] = i
return w.Write([]byte(fmt.Sprintf("$%d", i)))
}), am
st.am = append(st.am, []byte(tag))
st.i++
st.vmap[tag] = st.i
return w.Write([]byte(fmt.Sprintf("$%d", st.i)))
}
t1 := fasttemplate.New(tmpl, `'{{`, `}}'`)
ts1 := t1.ExecuteFuncString(execFunc)
t2 := fasttemplate.New(ts1, `{{`, `}}`)
ts2 := t2.ExecuteFuncString(execFunc)
return ts2, st.am
}

View File

@ -108,7 +108,7 @@ func Do(log func(string, ...interface{}), additional ...dir) error {
// Ensure that we use the correct events, as they are not uniform across
// platforms. See https://github.com/fsnotify/fsnotify/issues/74
if conf.Production == false && strings.HasSuffix(event.Name, "/allow.list") {
if conf != nil && !conf.Production && strings.HasSuffix(event.Name, "/allow.list") {
continue
}
@ -168,7 +168,7 @@ func Do(log func(string, ...interface{}), additional ...dir) error {
func ReExec() {
err := syscall.Exec(binSelf, append([]string{binSelf}, os.Args[1:]...), os.Environ())
if err != nil {
logger.Fatal().Err(err).Msg("cannot restart")
errlog.Fatal().Err(err).Msg("cannot restart")
}
}

View File

@ -22,21 +22,24 @@ type resolvFn struct {
Fn func(h http.Header, id []byte) ([]byte, error)
}
func initResolvers() error {
func initResolvers() {
var err error
rmap = make(map[uint64]*resolvFn)
for _, t := range conf.Tables {
err := initRemotes(t)
err = initRemotes(t)
if err != nil {
return err
break
}
}
return nil
if err != nil {
errlog.Fatal().Err(err).Msg("failed to initialize resolvers")
}
}
func initRemotes(t configTable) error {
h := xxhash.New()
var err error
for _, r := range t.Remotes {
// defines the table column to be used as an id in the
@ -46,21 +49,20 @@ func initRemotes(t configTable) error {
// if no table column specified in the config then
// use the primary key of the table as the id
if len(idcol) == 0 {
idcol, err = pcompile.IDColumn(t.Name)
pcol, err := pcompile.IDColumn(t.Name)
if err != nil {
return err
}
idcol = pcol.Key
}
idk := fmt.Sprintf("__%s_%s", t.Name, idcol)
// register a relationship between the remote data
// and the database table
val := &psql.DBRel{
Type: psql.RelRemote,
Col1: idcol,
Col2: idk,
}
val := &psql.DBRel{Type: psql.RelRemote}
val.Left.Col = idcol
val.Right.Col = idk
err := pcompile.AddRelationship(strings.ToLower(r.Name), t.Name, val)
if err != nil {
@ -103,6 +105,10 @@ func buildFn(r configRemote) func(http.Header, []byte) ([]byte, error) {
return nil, err
}
if host, ok := hdr["Host"]; ok {
req.Host = host[0]
}
for _, v := range r.SetHeaders {
req.Header.Set(v.Name, v.Value)
}
@ -111,13 +117,11 @@ func buildFn(r configRemote) func(http.Header, []byte) ([]byte, error) {
req.Header.Set(v, hdr.Get(v))
}
if host, ok := hdr["Host"]; ok {
req.Host = host[0]
}
logger.Debug().Str("uri", uri).Msg("Remote Join")
res, err := client.Do(req)
if err != nil {
logger.Error().Err(err).Msgf("Failed to connect to: %s", uri)
errlog.Error().Err(err).Msgf("Failed to connect to: %s", uri)
return nil, err
}
defer res.Body.Close()
@ -133,7 +137,7 @@ func buildFn(r configRemote) func(http.Header, []byte) ([]byte, error) {
return nil, err
}
logger.Warn().Msgf("Remote Request Debug:\n%s\n%s",
logger.Debug().Msgf("Remote Request Debug:\n%s\n%s",
reqDump, resDump)
}

338
serv/rice-box.go Normal file

File diff suppressed because one or more lines are too long

View File

@ -10,77 +10,39 @@ import (
"time"
rice "github.com/GeertJohan/go.rice"
"github.com/NYTimes/gziphandler"
"github.com/dosco/super-graph/psql"
"github.com/dosco/super-graph/qcode"
)
func initCompilers(c *config) (*qcode.Compiler, *psql.Compiler, error) {
schema, err := psql.NewDBSchema(db, c.getAliasMap())
di, err := psql.GetDBInfo(db)
if err != nil {
return nil, nil, err
}
conf := qcode.Config{
Blocklist: c.DB.Defaults.Blocklist,
KeepArgs: false,
if err = addTables(c, di); err != nil {
return nil, nil, err
}
qc, err := qcode.NewCompiler(conf)
if err = addForeignKeys(c, di); err != nil {
return nil, nil, err
}
schema, err = psql.NewDBSchema(di, c.getAliasMap())
if err != nil {
return nil, nil, err
}
blockFilter := []string{"false"}
qc, err := qcode.NewCompiler(qcode.Config{
Blocklist: c.DB.Blocklist,
})
if err != nil {
return nil, nil, err
}
for _, r := range c.Roles {
for _, t := range r.Tables {
query := qcode.QueryConfig{
Limit: t.Query.Limit,
Filters: t.Query.Filters,
Columns: t.Query.Columns,
DisableFunctions: t.Query.DisableFunctions,
}
if t.Query.Block {
query.Filters = blockFilter
}
insert := qcode.InsertConfig{
Filters: t.Insert.Filters,
Columns: t.Insert.Columns,
Presets: t.Insert.Presets,
}
if t.Query.Block {
insert.Filters = blockFilter
}
update := qcode.UpdateConfig{
Filters: t.Insert.Filters,
Columns: t.Insert.Columns,
Presets: t.Insert.Presets,
}
if t.Query.Block {
update.Filters = blockFilter
}
delete := qcode.DeleteConfig{
Filters: t.Insert.Filters,
Columns: t.Insert.Columns,
}
if t.Query.Block {
delete.Filters = blockFilter
}
qc.AddRole(r.Name, t.Name, qcode.TRConfig{
Query: query,
Insert: insert,
Update: update,
Delete: delete,
})
}
if err := addRoles(c, qc); err != nil {
return nil, nil, err
}
pc := psql.NewCompiler(psql.Config{
@ -92,7 +54,7 @@ func initCompilers(c *config) (*qcode.Compiler, *psql.Compiler, error) {
}
func initWatcher(cpath string) {
if conf.WatchAndReload == false {
if conf != nil && !conf.WatchAndReload {
return
}
@ -106,27 +68,47 @@ func initWatcher(cpath string) {
go func() {
err := Do(logger.Printf, d)
if err != nil {
logger.Fatal().Err(err).Send()
errlog.Fatal().Err(err).Send()
}
}()
}
func startHTTP() {
hp := strings.SplitN(conf.HostPort, ":", 2)
var hostPort string
var appName string
if len(conf.Host) != 0 {
hp[0] = conf.Host
defaultHP := "0.0.0.0:8080"
env := os.Getenv("GO_ENV")
if conf != nil {
appName = conf.AppName
hp := strings.SplitN(conf.HostPort, ":", 2)
if len(hp) == 2 {
if len(conf.Host) != 0 {
hp[0] = conf.Host
}
if len(conf.Port) != 0 {
hp[1] = conf.Port
}
hostPort = fmt.Sprintf("%s:%s", hp[0], hp[1])
}
}
if len(conf.Port) != 0 {
hp[1] = conf.Port
if len(hostPort) == 0 {
hostPort = defaultHP
}
hostPort := fmt.Sprintf("%s:%s", hp[0], hp[1])
routes, err := routeHandler()
if err != nil {
errlog.Fatal().Err(err).Send()
}
srv := &http.Server{
Addr: hostPort,
Handler: routeHandler(),
Handler: routes,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
@ -139,7 +121,7 @@ func startHTTP() {
<-sigint
if err := srv.Shutdown(context.Background()); err != nil {
logger.Error().Err(err).Msg("shutdown signal received")
errlog.Error().Err(err).Msg("shutdown signal received")
}
close(idleConnsClosed)
}()
@ -148,29 +130,50 @@ func startHTTP() {
db.Close()
})
var ident string
if len(conf.AppName) == 0 {
ident = conf.Env
} else {
ident = conf.AppName
}
fmt.Printf("%s listening on %s (%s)\n", serverName, hostPort, ident)
logger.Info().
Str("version", version).
Str("git_branch", gitBranch).
Str("host_post", hostPort).
Str("app_name", appName).
Str("env", env).
Msgf("%s listening", serverName)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
logger.Error().Err(err).Msg("server closed")
errlog.Error().Err(err).Msg("server closed")
}
<-idleConnsClosed
}
func routeHandler() http.Handler {
func routeHandler() (http.Handler, error) {
mux := http.NewServeMux()
mux.Handle("/api/v1/graphql", withAuth(apiv1Http))
if conf == nil {
return mux, nil
}
routes := map[string]http.Handler{
"/health": http.HandlerFunc(health),
"/api/v1/graphql": withAuth(http.HandlerFunc(apiV1), conf.Auth),
}
if err := setActionRoutes(routes); err != nil {
return nil, err
}
if conf.WebUI {
mux.Handle("/", http.FileServer(rice.MustFindBox("../web/build").HTTPBox()))
routes["/"] = http.FileServer(rice.MustFindBox("../web/build").HTTPBox())
}
if conf.HTTPGZip {
gz := gziphandler.MustNewGzipLevelHandler(6)
for k, v := range routes {
routes[k] = gz(v)
}
}
for k, v := range routes {
mux.Handle(k, v)
}
fn := func(w http.ResponseWriter, r *http.Request) {
@ -178,29 +181,38 @@ func routeHandler() http.Handler {
mux.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
return http.HandlerFunc(fn), nil
}
func getConfigName() string {
if len(os.Getenv("GO_ENV")) == 0 {
return "dev"
func setActionRoutes(routes map[string]http.Handler) error {
var err error
for _, a := range conf.Actions {
var fn http.Handler
fn, err = newAction(a)
if err != nil {
break
}
p := fmt.Sprintf("/api/v1/actions/%s", strings.ToLower(a.Name))
if authc, ok := findAuth(a.AuthName); ok {
routes[p] = withAuth(fn, authc)
} else {
routes[p] = fn
}
}
ge := strings.ToLower(os.Getenv("GO_ENV"))
switch {
case strings.HasPrefix(ge, "pro"):
return "prod"
case strings.HasPrefix(ge, "sta"):
return "stage"
case strings.HasPrefix(ge, "tes"):
return "test"
case strings.HasPrefix(ge, "dev"):
return "dev"
}
return ge
return nil
}
func findAuth(name string) (configAuth, bool) {
var authc configAuth
for _, a := range conf.Auths {
if strings.EqualFold(a.Name, name) {
return a, true
}
}
return authc, false
}

View File

@ -28,9 +28,7 @@ func (pl *Logger) Log(ctx context.Context, level pgx.LogLevel, msg string, data
zlevel = zerolog.ErrorLevel
case pgx.LogLevelWarn:
zlevel = zerolog.WarnLevel
case pgx.LogLevelInfo:
zlevel = zerolog.InfoLevel
case pgx.LogLevelDebug:
case pgx.LogLevelDebug, pgx.LogLevelInfo:
zlevel = zerolog.DebugLevel
default:
zlevel = zerolog.DebugLevel

View File

@ -7,11 +7,13 @@ import (
"io"
"sort"
"strings"
"sync"
"github.com/cespare/xxhash/v2"
"github.com/dosco/super-graph/jsn"
)
// nolint: errcheck
func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
h.WriteString(k1)
h.WriteString(k2)
@ -21,6 +23,15 @@ func mkkey(h *xxhash.Digest, k1 string, k2 string) uint64 {
return v
}
// nolint: errcheck
func stmtHash(name string, role string) string {
h := sha1.New()
io.WriteString(h, strings.ToLower(name))
io.WriteString(h, role)
return hex.EncodeToString(h.Sum(nil))
}
// nolint: errcheck
func gqlHash(b string, vars []byte, role string) string {
b = strings.TrimSpace(b)
h := sha1.New()
@ -64,7 +75,7 @@ func gqlHash(b string, vars []byte, role string) string {
} else {
starting = false
s = e
for e < len(b) && ws(b[e]) == false {
for e < len(b) && !ws(b[e]) {
e++
}
if e != 0 {
@ -81,7 +92,7 @@ func gqlHash(b string, vars []byte, role string) string {
io.WriteString(h, role)
}
if vars == nil || len(vars) == 0 {
if len(vars) == 0 {
return hex.EncodeToString(h.Sum(nil))
}
@ -106,19 +117,6 @@ func al(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || (b >= '0' && b <= '9')
}
func isMutation(sql string) bool {
for i := range sql {
b := sql[i]
if b == '{' {
return false
}
if al(b) {
return (b == 'm' || b == 'M')
}
}
return false
}
func findStmt(role string, stmts []stmt) *stmt {
for i := range stmts {
if stmts[i].role.Name != role {
@ -128,3 +126,16 @@ func findStmt(role string, stmts []stmt) *stmt {
}
return nil
}
func fatalInProd(err error, msg string) {
var wg sync.WaitGroup
if !isDev() {
errlog.Fatal().Err(err).Msg(msg)
}
errlog.Error().Err(err).Msg(msg)
wg.Add(1)
wg.Wait()
}

View File

@ -77,7 +77,7 @@ SQL Output
database:
variables:
account_id: "select account_id from users where id = $user_id"
admin_account_id: "5"
defaults:
Filters: ["{ user_id: { eq: $user_id } }"]

View File

@ -3,7 +3,10 @@ host_port: 0.0.0.0:8080
web_ui: true
# debug, info, warn, error, fatal, panic
log_level: "debug"
log_level: "info"
# enable or disable http compression (uses gzip)
http_compress: true
# When production mode is 'true' only queries
# from the allow list are permitted.
@ -46,7 +49,7 @@ migrations_path: ./config/migrations
# sheep: sheep
auth:
# Can be 'rails' or 'jwt'
# Can be 'rails', 'jwt' or 'header'
type: rails
cookie: _{% app_name_slug %}_session
@ -80,6 +83,22 @@ auth:
# public_key_file: /secrets/public_key.pem
# public_key_type: ecdsa #rsa
# header:
# name: dnt
# exists: true
# value: localhost:8080
# You can add additional named auths to use with actions
# In this example actions using this auth can only be
# called from the Google Appengine Cron service that
# sets a special header to all it's requests
auths:
- name: from_taskqueue
type: header
header:
name: X-Appengine-Cron
exists: true
database:
type: postgres
host: db
@ -97,23 +116,31 @@ database:
# Enable this if you need the user id in triggers, etc
set_user_id: false
# Define variables here that you want to use in filters
# sub-queries must be wrapped in ()
# database ping timeout is used for db health checking
ping_timeout: 1m
# Define additional variables here to be used with filters
variables:
account_id: "(select account_id from users where id = $user_id)"
admin_account_id: "5"
# Define defaults to for the field key and values below
defaults:
# filters: ["{ user_id: { eq: $user_id } }"]
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
# Field and table names that you wish to block
blocklist:
- ar_internal_metadata
- schema_migrations
- secret
- password
- encrypted
- token
# Create custom actions with their own api endpoints
# For example the below action will be available at /api/v1/actions/refresh_leaderboard_users
# A request to this url will execute the configured SQL query
# which in this case refreshes a materialized view in the database.
# The auth_name is from one of the configured auths
actions:
- name: refresh_leaderboard_users
sql: REFRESH MATERIALIZED VIEW CONCURRENTLY "leaderboard_users"
auth_name: from_taskqueue
tables:
- name: customers
@ -136,6 +163,7 @@ tables:
name: me
table: users
roles_query: "SELECT * FROM users WHERE id = $user_id"
roles:
@ -167,29 +195,24 @@ roles:
query:
limit: 50
filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
disable_functions: false
insert:
filters: ["{ user_id: { eq: $user_id } }"]
columns: ["id", "name", "description" ]
presets:
- user_id: "$user_id"
- created_at: "now"
update:
filters: ["{ user_id: { eq: $user_id } }"]
columns:
- id
- name
presets:
- updated_at: "now"
delete:
deny: true
block: true
- name: admin
match: id = 1
match: id = 1000
tables:
- name: users
# query:
# filters: ["{ account_id: { _eq: $account_id } }"]
filters: []

View File

@ -1,4 +1,4 @@
version: '3'
version: '3.4'
services:
db:
image: postgres

Some files were not shown because too many files have changed in this diff Show More