Compare commits
83 Commits
Author | SHA1 | Date | |
---|---|---|---|
0fc1236266 | |||
52ee2cf6a3 | |||
50ba6732fe | |||
a8ad87115e | |||
bcc443bd59 | |||
4e7f938f53 | |||
4683936de0 | |||
6f1a2acf96 | |||
dbcc51c462 | |||
4ddce9f804 | |||
d5dcff2810 | |||
20ddfb26f3 | |||
d715564833 | |||
63afefa9e8 | |||
9b63f01905 | |||
7243e9ef34 | |||
7e4d14fbca | |||
527ea5044d | |||
1d651dd7ed | |||
ecec2968e9 | |||
5e86b445c5 | |||
18ce030056 | |||
1a949859e9 | |||
58eb272df0 | |||
78dbe32bd4 | |||
65921d4d42 | |||
fe4d1107ac | |||
e765176bfa | |||
9613a0153b | |||
c0a21e448f | |||
5b9105ff0c | |||
72f8650ac0 | |||
aa9a4e3dc4 | |||
716a6ba74b | |||
6fbc8587b2 | |||
4cba698f4a | |||
08f878f6c2 | |||
cf0e4d10fd | |||
dd085fb7fd | |||
2d8fc2b7e2 | |||
4c07ad1102 | |||
cd6d6f97a8 | |||
45c283f2a7 | |||
17ad74b4fc | |||
6b90b94e07 | |||
340dea242d | |||
9af320f396 | |||
9aa4928d7b | |||
a25fb16507 | |||
0b7512cf94 | |||
7643e6ffa7 | |||
661c822219 | |||
5f6ea226a3 | |||
6d370029e9 | |||
2f55131315 | |||
8b06473e58 | |||
f9fc5dd7de | |||
77e56643c5 | |||
7c704637e8 | |||
58408eadc1 | |||
e1d3bb9055 | |||
f16e95ef22 | |||
6c9accb628 | |||
1e78491cb2 | |||
b405dafb89 | |||
7c37a1ef63 | |||
5edcf7b3a9 | |||
74ea365c5a | |||
c03f7d6576 | |||
b666876064 | |||
652b31ce38 | |||
0755ecf6bd | |||
7f8ab26218 | |||
bdf76fcd5e | |||
de52ce4738 | |||
6b164aed08 | |||
acadef1816 | |||
5afed8dd54 | |||
6b78f31fd8 | |||
710a8c1115 | |||
5bd2f5a339 | |||
dd3132bda0 | |||
40fb85c926 |
@ -1,4 +1,4 @@
|
||||
example
|
||||
demo
|
||||
tmp
|
||||
*.md
|
||||
web/build
|
||||
|
3
.gitignore
vendored
@ -21,9 +21,10 @@
|
||||
/Godeps
|
||||
/tmp
|
||||
/tmp/runner-build
|
||||
/example/tmp
|
||||
/demo/tmp
|
||||
|
||||
.vscode
|
||||
main
|
||||
.DS_Store
|
||||
.swp
|
||||
main
|
||||
|
27
Dockerfile
@ -6,13 +6,14 @@ RUN yarn
|
||||
RUN yarn build
|
||||
|
||||
# stage: 2
|
||||
FROM golang:1.12-alpine as go-build
|
||||
FROM golang:1.13beta1-alpine as go-build
|
||||
RUN apk update && \
|
||||
apk add --no-cache git && \
|
||||
apk add --no-cache upx=3.95-r1
|
||||
apk add --no-cache upx=3.95-r2
|
||||
|
||||
RUN go get -u github.com/dosco/esc && \
|
||||
go get -u github.com/pilu/fresh
|
||||
RUN go get -u github.com/shanzi/wu && \
|
||||
go install github.com/shanzi/wu && \
|
||||
go get github.com/GeertJohan/go.rice/rice
|
||||
|
||||
WORKDIR /app
|
||||
COPY . /app
|
||||
@ -23,23 +24,25 @@ COPY --from=react-build /web/build/ ./web/build/
|
||||
ENV GO111MODULE=on
|
||||
RUN go mod vendor
|
||||
RUN go generate ./... && \
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o service && \
|
||||
upx --ultra-brute -qq service && \
|
||||
upx -t service
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o super-graph && \
|
||||
echo "Compressing binary, will take a bit of time..." && \
|
||||
upx --ultra-brute -qq super-graph && \
|
||||
upx -t super-graph
|
||||
|
||||
# stage: 3
|
||||
FROM alpine:latest
|
||||
WORKDIR /app
|
||||
WORKDIR /
|
||||
|
||||
RUN apk add --no-cache tzdata
|
||||
RUN mkdir -p /config
|
||||
|
||||
COPY --from=go-build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=go-build /app/service .
|
||||
COPY --from=go-build /app/config/*.yml ./
|
||||
COPY --from=go-build /app/config/* /config/
|
||||
COPY --from=go-build /app/super-graph .
|
||||
|
||||
RUN chmod +x /app/service
|
||||
RUN chmod +x /super-graph
|
||||
USER nobody
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ./service
|
||||
CMD ./super-graph serv
|
||||
|
38
README.md
@ -1,38 +1,54 @@
|
||||
# Super Graph
|
||||
<a href="https://supergraph.dev"><img src="https://supergraph.dev/hologram.svg" width="100" height="100" align="right" /></a>
|
||||
|
||||
## Instant GraphQL API for Rails. Zero code.
|
||||
# Super Graph - Build web products faster. Instant GraphQL APIs for your apps
|
||||
|
||||
Get an high-performance GraphQL API for your Rails app in seconds. Super Graph will auto-learn your database structure and relationships. Built in support for Rails authentication and JWT tokens.
|
||||

|
||||

|
||||

|
||||
[](https://discord.gg/6pSWCTZ)
|
||||
|
||||

|
||||
Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries.
|
||||
|
||||
## Why I built Super Graph?
|
||||

|
||||
|
||||
I have a Rails app that gets a bit of traffic. While planning to improve the UI using React or Vue I found that my current APIs didn't have what we needed. I'd have to add more controllers and ensure they are providing the right amount of data. This required designing new APIs and making sure they match what the webdevs need. While this is all to common work I was bored and there had to be a better way.
|
||||
## The story of Super Graph?
|
||||
|
||||
All my Rails controllers were esentially wrappers around database queries and its not exactly fun writing more of them.
|
||||
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.
|
||||
|
||||
I always liked GraphQL it made everything so simple. Web devs can use GraphQL to fetch exactly the data they need. There is one small issue however you still hasve to write a lot of the same database code.
|
||||
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.
|
||||
|
||||
I wanted a GraphQL server that just worked the second you deployed it without having to write a line of code.
|
||||
I didn't want to write this code anymore, I wanted the computer to do it. Enter GraphQL, to me it sounded great, but it still required me to write all the same database query code.
|
||||
|
||||
And so after a lot of coffee and some avocado toasts Super Graph was born. An instant GraphQL API service that's high performance and easy to deploy. I hope you find it as useful as I do and there's a lot more coming so hit that :star: to stay in the loop.
|
||||
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.
|
||||
|
||||
This compiler is what sits at the heart of Super Graph with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations and everything else needed for you to build production ready apps with it.
|
||||
|
||||
## Features
|
||||
|
||||
- Works with Rails database schemas
|
||||
- Automatically learns schemas and relationships
|
||||
- Belongs-To, One-To-Many and Many-To-Many table relationships
|
||||
- Full text search and Aggregations
|
||||
- Rails Auth supported (Redis, Memcache, Cookie)
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join with remote REST APIs
|
||||
- Highly optimized and fast Postgres SQL queries
|
||||
- Support GraphQL queries and mutations
|
||||
- Configure with a simple config file
|
||||
- High performance GO codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Database migrations tool
|
||||
- Write database seeding scripts in Javascript
|
||||
|
||||
## Documentation
|
||||
|
||||
[supergraph.dev](https://supergraph.dev)
|
||||
|
||||
## Contact me
|
||||
|
||||
[twitter.com/dosco](https://twitter.com/dosco)
|
||||
[twitter/dosco](https://twitter.com/dosco)
|
||||
|
||||
[chat/super-graph](https://discord.gg/6pSWCTZ)
|
||||
|
||||
## License
|
||||
|
||||
|
7
bench/bench.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
if brew ls --versions wrk > /dev/null; then
|
||||
wrk -t12 -c400 -d30s --timeout 10s --script=query.lua --latency http://localhost:8080/api/v1/graphql
|
||||
else
|
||||
brew install wek
|
||||
wrk -t12 -c400 -d30s --timeout 10s --script=query.lua --latency http://localhost:8080/api/v1/graphql
|
||||
fi
|
3
bench/query.lua
Normal file
@ -0,0 +1,3 @@
|
||||
wrk.method = "POST"
|
||||
wrk.headers["Content-Type"] = "application/json"
|
||||
wrk.body = [[{"query":" { products( limit: 30, order_by: { price: desc }, distinct: [ price ] where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) { id name price user { id email } } } "}]]
|
104
config/allow.list
Normal file
@ -0,0 +1,104 @@
|
||||
# http://localhost:8080/
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Wu-Tang",
|
||||
"description": "No description needed"
|
||||
},
|
||||
"product_id": 1
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, update: $update) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"email": "gfk@myspace.com",
|
||||
"full_name": "Ghostface Killah",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": {
|
||||
"product_id": 5
|
||||
}
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: $product_id, delete: true) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
users {
|
||||
id
|
||||
email
|
||||
picture: avatar
|
||||
products(limit: 2, where: {price: {gt: 10}}) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"data": [
|
||||
{
|
||||
"name": "Protect Ya Neck",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
},
|
||||
{
|
||||
"name": "Enter the Wu-Tang",
|
||||
"created_at": "now",
|
||||
"updated_at": "now"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
query {
|
||||
me {
|
||||
id
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
|
||||
variables {
|
||||
"update": {
|
||||
"name": "Helloo",
|
||||
"description": "World \u003c\u003e"
|
||||
},
|
||||
"user": 123
|
||||
}
|
||||
|
||||
mutation {
|
||||
products(id: 5, update: $update) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
@ -1,13 +1,35 @@
|
||||
title: Super Graph Development
|
||||
app_name: "Super Graph Development"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: true
|
||||
debug_level: 1
|
||||
enable_tracing: true
|
||||
|
||||
# debug, info, warn, error, fatal, panic
|
||||
log_level: "debug"
|
||||
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: false
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: never
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# Watch the config folder and reload Super Graph
|
||||
# with the new configs when a change is detected
|
||||
reload_on_config_change: false
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
migrations_path: ./config/migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
@ -42,10 +64,10 @@ auth:
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
# Remote cookie store. (memcache or redis)
|
||||
# url: redis://127.0.0.1:6379
|
||||
# password: test
|
||||
# max_idle: 80,
|
||||
# max_active: 12000,
|
||||
# url: redis://redis:6379
|
||||
# password: ""
|
||||
# max_idle: 80
|
||||
# max_active: 12000
|
||||
|
||||
# In most cases you don't need these
|
||||
# salt: "encrypted cookie"
|
||||
@ -58,7 +80,6 @@ auth:
|
||||
# public_key_file: /secrets/public_key.pem
|
||||
# public_key_type: ecdsa #rsa
|
||||
|
||||
|
||||
database:
|
||||
type: postgres
|
||||
host: db
|
||||
@ -66,20 +87,23 @@ database:
|
||||
dbname: app_development
|
||||
user: postgres
|
||||
password: ''
|
||||
|
||||
#schema: "public"
|
||||
#pool_size: 10
|
||||
#max_retries: 0
|
||||
#log_level: "debug"
|
||||
|
||||
# Define variables here that you want to use in filters
|
||||
# sub-queries must be wrapped in ()
|
||||
variables:
|
||||
account_id: "select account_id from users where id = $user_id"
|
||||
account_id: "(select account_id from users where id = $user_id)"
|
||||
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
filter: ["{ user_id: { eq: $user_id } }"]
|
||||
# filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Fields and table names that you wish to block
|
||||
blacklist:
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
@ -87,23 +111,37 @@ database:
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
fields:
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
# filter: ["{ id: { eq: $user_id } }"]
|
||||
|
||||
- name: products
|
||||
# Multiple filters are AND'd together
|
||||
filter: [
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }"
|
||||
]
|
||||
# - name: products
|
||||
# # Multiple filters are AND'd together
|
||||
# filter: [
|
||||
# "{ price: { gt: 0 } }",
|
||||
# "{ price: { lt: 8 } }"
|
||||
# ]
|
||||
|
||||
- name: customers
|
||||
# No filter is used for this field not
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
pass_headers:
|
||||
- cookie
|
||||
set_headers:
|
||||
- name: Host
|
||||
value: 0.0.0.0
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
|
@ -1,13 +1,31 @@
|
||||
title: Super Graph Production
|
||||
app_name: "Super Graph Production"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: false
|
||||
debug_level: 0
|
||||
enable_tracing: false
|
||||
|
||||
# debug, info, warn, error, fatal, panic, disable
|
||||
log_level: "info"
|
||||
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: true
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: always
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# File that points to the database seeding script
|
||||
# seed_file: seed.js
|
||||
|
||||
# Path pointing to where the migrations can be found
|
||||
# migrations_path: migrations
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
# SG_DATABASE_PORT
|
||||
@ -62,7 +80,7 @@ database:
|
||||
type: postgres
|
||||
host: db
|
||||
port: 5432
|
||||
dbname: app_development
|
||||
dbname: {{app_name_slug}}_development
|
||||
user: postgres
|
||||
password: ''
|
||||
#pool_size: 10
|
||||
@ -70,15 +88,16 @@ database:
|
||||
#log_level: "debug"
|
||||
|
||||
# Define variables here that you want to use in filters
|
||||
# sub-queries must be wrapped in ()
|
||||
variables:
|
||||
account_id: "select account_id from users where id = $user_id"
|
||||
account_id: "(select account_id from users where id = $user_id)"
|
||||
|
||||
# Define defaults to for the field key and values below
|
||||
defaults:
|
||||
filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Fields and table names that you wish to block
|
||||
blacklist:
|
||||
# Field and table names that you wish to block
|
||||
blocklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
- secret
|
||||
@ -86,7 +105,7 @@ database:
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
fields:
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
@ -103,6 +122,18 @@ database:
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
# remotes:
|
||||
# - name: payments
|
||||
# id: stripe_id
|
||||
# url: http://rails_app:3000/stripe/$id
|
||||
# path: data
|
||||
# # pass_headers:
|
||||
# # - cookie
|
||||
# # - host
|
||||
# set_headers:
|
||||
# - name: Authorization
|
||||
# value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
|
114
config/seed.js
Normal file
@ -0,0 +1,114 @@
|
||||
var user_count = 10
|
||||
customer_count = 100
|
||||
product_count = 50
|
||||
purchase_count = 100
|
||||
|
||||
var users = []
|
||||
customers = []
|
||||
products = []
|
||||
|
||||
for (i = 0; i < user_count; i++) {
|
||||
var pwd = fake.password()
|
||||
var data = {
|
||||
full_name: fake.name(),
|
||||
avatar: fake.image_url(),
|
||||
phone: fake.phone(),
|
||||
email: fake.email(),
|
||||
password: pwd,
|
||||
password_confirmation: pwd,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
user(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
|
||||
users.push(res.user)
|
||||
}
|
||||
|
||||
for (i = 0; i < product_count; i++) {
|
||||
var n = Math.floor(Math.random() * users.length)
|
||||
var user = users[n]
|
||||
|
||||
var desc = [
|
||||
fake.beer_style(),
|
||||
fake.beer_hop(),
|
||||
fake.beer_yeast(),
|
||||
fake.beer_ibu(),
|
||||
fake.beer_alcohol(),
|
||||
fake.beer_blg(),
|
||||
].join(", ")
|
||||
|
||||
var data = {
|
||||
name: fake.beer_name(),
|
||||
description: desc,
|
||||
price: fake.price(),
|
||||
user_id: user.id,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
product(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
products.push(res.product)
|
||||
}
|
||||
|
||||
for (i = 0; i < customer_count; i++) {
|
||||
var pwd = fake.password()
|
||||
|
||||
var data = {
|
||||
stripe_id: "CUS-" + fake.uuid(),
|
||||
full_name: fake.name(),
|
||||
phone: fake.phone(),
|
||||
email: fake.email(),
|
||||
password: pwd,
|
||||
password_confirmation: pwd,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
customer(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
customers.push(res.customer)
|
||||
}
|
||||
|
||||
for (i = 0; i < purchase_count; i++) {
|
||||
var sale_type = fake.rand_string(["rented", "bought"])
|
||||
|
||||
if (sale_type === "rented") {
|
||||
var due_date = fake.date()
|
||||
var returned = fake.date()
|
||||
}
|
||||
|
||||
var data = {
|
||||
customer_id: customers[Math.floor(Math.random() * customer_count)].id,
|
||||
product_id: products[Math.floor(Math.random() * product_count)].id,
|
||||
sale_type: sale_type,
|
||||
quantity: Math.floor(Math.random() * 10),
|
||||
due_date: due_date,
|
||||
returned: returned,
|
||||
created_at: "now",
|
||||
updated_at: "now"
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
purchase(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
|
||||
console.log(res)
|
||||
}
|
21
corpus/1
Normal file
@ -0,0 +1,21 @@
|
||||
query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
14
corpus/2
Normal file
@ -0,0 +1,14 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
12
corpus/3
Normal file
@ -0,0 +1,12 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
12
corpus/4
Normal file
@ -0,0 +1,12 @@
|
||||
query {
|
||||
products(
|
||||
where: {
|
||||
and: [
|
||||
{ not: { id: { is_null: true } } },
|
||||
{ price: { gt: 10 } },
|
||||
] } ) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
6
corpus/5
Normal file
@ -0,0 +1,6 @@
|
||||
query {
|
||||
product(id: $PRODUCT_ID, where: { price: { eq: $PRODUCT_PRICE } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
22
corpus/8
Normal file
@ -0,0 +1,22 @@
|
||||
query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
user {
|
||||
full_name
|
||||
picture : avatar
|
||||
}
|
||||
}
|
||||
}
|
13
demo
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" == "start" ]; then
|
||||
echo "Downloading pre-built docker images"
|
||||
docker-compose -f rails-app/demo.yml pull
|
||||
echo "Setting up and deploying Super Graph and the demo Rails app"
|
||||
docker-compose -f rails-app/demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
docker-compose -f rails-app/demo.yml up
|
||||
elif [ "$1" == "stop" ]; then
|
||||
docker-compose -f rails-app/demo.yml down
|
||||
else
|
||||
echo "./demo [start|stop]"
|
||||
fi
|
@ -2,13 +2,25 @@ version: '3.4'
|
||||
services:
|
||||
db:
|
||||
image: postgres
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
# nginx:
|
||||
# image: nginx:latest
|
||||
# volumes:
|
||||
# - ./example/nginx.conf:/etc/nginx/nginx.conf
|
||||
# redis:
|
||||
# image: redis:alpine
|
||||
# command: ["redis-server", "--appendonly", "yes"]
|
||||
# ports:
|
||||
# - 3001:3001
|
||||
# - "6379:6379"
|
||||
|
||||
rails_app:
|
||||
build: rails-app/.
|
||||
command: bash -c "rm -f tmp/pids/server.pid && bundle exec rails s -p 3000 -b '0.0.0.0'"
|
||||
volumes:
|
||||
- ./rails-app:/app
|
||||
- /app/tmp
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
super_graph:
|
||||
build:
|
||||
@ -16,23 +28,15 @@ services:
|
||||
target: go-build
|
||||
environment:
|
||||
GO_ENV: "development"
|
||||
depends_on:
|
||||
- db
|
||||
PORT: 8080
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- .:/app
|
||||
working_dir: /app
|
||||
command: fresh -c fresh.conf
|
||||
|
||||
web:
|
||||
build: example/.
|
||||
command: bash -c "rm -f tmp/pids/server.pid && bundle exec rails s -p 3000 -b '0.0.0.0'"
|
||||
volumes:
|
||||
- ./example:/app
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: wu -pattern="*.go" go run main.go serv
|
||||
depends_on:
|
||||
- db
|
||||
- super_graph
|
||||
# - nginx
|
||||
- rails_app
|
||||
|
||||
# - redis
|
||||
|
184
docs/.vuepress/components/HomeLayout.vue
Normal file
@ -0,0 +1,184 @@
|
||||
<template>
|
||||
<div>
|
||||
<main aria-labelledby="main-title" >
|
||||
<Navbar />
|
||||
|
||||
<div class="container mx-auto">
|
||||
<div class="flex flex-col md:flex-row justify-between px-10 md:px-20">
|
||||
<div class="bg-bottom bg-no-repeat bg-cover">
|
||||
<div class="text-center md:text-left pt-24">
|
||||
<h1 v-if="data.heroText !== null" class="text-5xl font-bold text-black pb-0 uppercase">
|
||||
{{ data.heroText || $title || 'Hello' }}
|
||||
</h1>
|
||||
|
||||
<p class="text-2xl text-gray-700 leading-tight pb-0">
|
||||
{{ data.tagline || $description || 'Welcome to your VuePress site' }}
|
||||
</p>
|
||||
|
||||
<p class="text-lg text-gray-600 leading-tight">
|
||||
{{ data.longTagline }}
|
||||
</p>
|
||||
|
||||
<NavLink
|
||||
class="inline-block px-4 py-3 my-8 bg-blue-600 text-blue-100 font-bold rounded"
|
||||
:item="actionLink"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="py-10 md:p-20">
|
||||
<img src="/hologram.svg" class="h-64">
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<div
|
||||
class="flex flex-wrap mx-2 md:mx-20"
|
||||
v-if="data.features && data.features.length"
|
||||
>
|
||||
<div
|
||||
class="w-2/4 md:w-1/3 shadow"
|
||||
v-for="(feature, index) in data.features"
|
||||
:key="index"
|
||||
>
|
||||
<div class="p-8">
|
||||
<h2 class="md:text-xl text-blue-800 font-medium border-0 mb-1">{{ feature.title }}</h2>
|
||||
<p class="md:text-xl text-gray-700 leading-snug">{{ feature.details }}</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-gray-100 mt-10">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
What is {{ data.heroText }}?
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-wrap">
|
||||
<div class="md:w-2/4">
|
||||
<img src="/graphql.png">
|
||||
</div>
|
||||
|
||||
<div class="md:w-2/4">
|
||||
<img src="/json.png">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="mt-10 py-10 md:py-20">
|
||||
<div class="container mx-auto px-10 md:px-0">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
How to use {{ data.heroText }}?
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
<small class="text-sm">Use the below command to download and install Super Graph. You will need Go 1.13 or above</small>
|
||||
<pre>‣ GO111MODULE=on go get -u github.com/dosco/super-graph</pre>
|
||||
|
||||
<small class="text-sm">Create a new app and change to it's directory</small>
|
||||
<pre>‣ super-graph new blog; cd blog</pre>
|
||||
|
||||
<small class="text-sm">Setup the app database and seed it with fake data. Docker compose will start a Postgres database for your app</small>
|
||||
<pre>‣ docker-compose run blog_api ./super-graph db:setup</pre>
|
||||
|
||||
<small class="text-sm">And finally launch Super Graph configured for your app</small>
|
||||
<pre>‣ docker-compose up</pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-gray-100 mt-10">
|
||||
<div class="container mx-auto px-10 md:px-0 py-32">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
The story of {{ data.heroText }}
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
After working on several products through my career I find that we spend way too much time on building API backends. Most APIs also require constant updating, this costs real time and money.<br><br>
|
||||
|
||||
It's always the same thing, figure out what the UI needs then build an endpoint for it. Most API code involves struggling with an ORM to query a database and mangle the data into a shape that the UI expects to see.<br><br>
|
||||
|
||||
I didn't want to write this code anymore, I wanted the computer to do it. Enter GraphQL, to me it sounded great, but it still required me to write all the same database query code.<br><br>
|
||||
|
||||
Having worked with compilers before I saw this as a compiler problem. Why not build a compiler that converts GraphQL to highly efficient SQL.<br><br>
|
||||
|
||||
This compiler is what sits at the heart of Super Graph with layers of useful functionality around it like authentication, remote joins, rails integration, database migrations and everything else needed for you to build production ready apps with it.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="overflow-hidden bg-indigo-900" style="height: 730px">
|
||||
<div class="container mx-auto py-20">
|
||||
<img src="/super-graph-web-ui.png">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="py-10 md:py-20">
|
||||
<div class="container mx-auto px-10 md:px-0">
|
||||
<h1 class="uppercase font-semibold text-xl text-blue-800 mb-4">
|
||||
Try it with a demo Rails app
|
||||
</h1>
|
||||
<div class="text-2xl md:text-3xl">
|
||||
<small class="text-sm">Download the Docker compose config for the demo</small>
|
||||
<pre>‣ curl -L -o demo.yml https://bit.ly/2mq05lW</pre>
|
||||
|
||||
<small class="text-sm">Setup the demo database</small>
|
||||
<pre>‣ docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed</pre>
|
||||
|
||||
<small class="text-sm">Run the demo</small>
|
||||
<pre>‣ docker-compose -f demo.yml up</pre>
|
||||
|
||||
<small class="text-sm">Signin to the demo app (user1@demo.com / 123456)</small>
|
||||
<pre>‣ open http://localhost:3000</pre>
|
||||
|
||||
<small class="text-sm">Try the super graph web ui</small>
|
||||
<pre>‣ open http://localhost:8080</pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="border-t py-10">
|
||||
<div class="container mx-auto">
|
||||
<style>.embed-container { position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden; max-width: 100%; } .embed-container iframe, .embed-container object, .embed-container embed { position: absolute; top: 0; left: 0; width: 100%; height: 100%; }</style>
|
||||
<div class="embed-container shadow">
|
||||
<iframe src='https://www.youtube.com/embed/MfPL2A-DAJk' frameborder='0' allowfullscreen></iframe>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div
|
||||
class="mx-auto text-center py-8"
|
||||
v-if="data.footer"
|
||||
>
|
||||
{{ data.footer }}
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import NavLink from '@theme/components/NavLink.vue'
|
||||
import Navbar from '@theme/components/Navbar.vue'
|
||||
|
||||
export default {
|
||||
components: { NavLink, Navbar },
|
||||
|
||||
computed: {
|
||||
data () {
|
||||
return this.$page.frontmatter
|
||||
},
|
||||
|
||||
actionLink () {
|
||||
return {
|
||||
link: this.data.actionLink,
|
||||
text: this.data.actionText
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
@ -3,11 +3,30 @@ module.exports = {
|
||||
description: 'Get an instant GraphQL API for your Rails apps.',
|
||||
|
||||
themeConfig: {
|
||||
logo: '/hero.png',
|
||||
logo: '/hologram.svg',
|
||||
nav: [
|
||||
{ text: 'Guide', link: '/guide' },
|
||||
{ text: 'Docs', link: '/guide' },
|
||||
{ text: 'Deploy', link: '/deploy' },
|
||||
{ text: 'Github', link: 'https://github.com/dosco/super-graph' },
|
||||
{ text: 'Docker', link: 'https://hub.docker.com/r/dosco/super-graph/builds' },
|
||||
{ text: 'Join Chat', link: 'https://discord.gg/NKdXBc' },
|
||||
|
||||
],
|
||||
serviceWorker: {
|
||||
updatePopup: true
|
||||
},
|
||||
},
|
||||
|
||||
postcss: {
|
||||
plugins: [
|
||||
require('postcss-import'),
|
||||
require('tailwindcss'),
|
||||
require('postcss-nested'),
|
||||
require('autoprefixer')
|
||||
]
|
||||
},
|
||||
|
||||
plugins: [
|
||||
'@vuepress/plugin-nprogress',
|
||||
]
|
||||
}
|
||||
}
|
1
docs/.vuepress/public/camp.svg
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
docs/.vuepress/public/graphql.png
Normal file
After Width: | Height: | Size: 66 KiB |
Before Width: | Height: | Size: 2.5 KiB |
1
docs/.vuepress/public/hologram.svg
Normal file
After Width: | Height: | Size: 7.4 KiB |
BIN
docs/.vuepress/public/json.png
Normal file
After Width: | Height: | Size: 149 KiB |
23
docs/.vuepress/public/logo.svg
Normal file
@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="61px" height="60px" viewBox="0 0 61 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<!-- Generator: Sketch 46.2 (44496) - http://www.bohemiancoding.com/sketch -->
|
||||
<title>Desktop</title>
|
||||
<desc>Created with Sketch.</desc>
|
||||
<defs></defs>
|
||||
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<g id="Desktop" transform="translate(-290.000000, -536.000000)" stroke="#1D4573">
|
||||
<g id="Super-logo" transform="translate(292.000000, 538.000000)">
|
||||
<circle id="Big-circle" stroke-width="5" fill="#FFFFFF" cx="29.5" cy="28.5" r="26.5"></circle>
|
||||
<circle id="Small-circle-1" stroke-width="3" fill="#FFFFFF" cx="36" cy="3" r="3"></circle>
|
||||
<circle id="Small-circle-2" stroke-width="3" fill="#FFFFFF" cx="54" cy="19" r="3"></circle>
|
||||
<circle id="Small-circle-3" stroke-width="3" fill="#FFFFFF" cx="50" cy="44" r="3"></circle>
|
||||
<circle id="Small-circle-4" stroke-width="3" fill="#FFFFFF" cx="10" cy="47" r="3"></circle>
|
||||
<circle id="Small-circle-5" stroke-width="3" fill="#FFFFFF" cx="3" cy="25" r="3"></circle>
|
||||
<path d="M7.5,25.5 L46.6885246,40.6944444" id="Line-1" stroke-width="3" stroke-linecap="square"></path>
|
||||
<path d="M13.3055556,42.6864407 L34.6944444,6.31355932" id="Line-2" stroke-width="3" stroke-linecap="square"></path>
|
||||
<path d="M49.6885246,21.3108108 L12.3114754,43.6891892" id="Line-3" stroke-width="3" stroke-linecap="square"></path>
|
||||
<path d="M36.3,6.31578947 L47.7,41.6842105" id="Line-4" stroke-width="3" stroke-linecap="square"></path>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.8 KiB |
BIN
docs/.vuepress/public/super-graph-web-ui-half.png
Normal file
After Width: | Height: | Size: 146 KiB |
BIN
docs/.vuepress/public/tracing.png
Normal file
After Width: | Height: | Size: 42 KiB |
25
docs/.vuepress/styles/index.styl
Normal file
@ -0,0 +1,25 @@
|
||||
@tailwind base;
|
||||
|
||||
@css {
|
||||
h1 {
|
||||
@apply font-semibold text-3xl border-0 py-4
|
||||
}
|
||||
h2, h3, h4 {
|
||||
@apply font-semibold text-2xl border-0 py-4
|
||||
}
|
||||
p {
|
||||
@apply py-2 text-lg
|
||||
}
|
||||
pre {
|
||||
white-space: pre-wrap; /* css-3 */
|
||||
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
|
||||
white-space: -pre-wrap; /* Opera 4-6 */
|
||||
white-space: -o-pre-wrap; /* Opera 7 */
|
||||
word-wrap: break-word; /* Internet Explorer 5.5+ */
|
||||
font-size: 90%;
|
||||
}
|
||||
}
|
||||
|
||||
@tailwind components;
|
||||
|
||||
@tailwind utilities;
|
6
docs/.vuepress/styles/palette.styl
Normal file
@ -0,0 +1,6 @@
|
||||
// showing default values
|
||||
$accentColor = #3eaf7c
|
||||
$textColor = #2c3e50
|
||||
$borderColor = #eaecef
|
||||
$codeBgColor = #282c34
|
||||
$nprogressColor = #4fd1c5
|
@ -1,67 +1,30 @@
|
||||
---
|
||||
layout: HomeLayout
|
||||
|
||||
home: true
|
||||
heroImage: /hero.png
|
||||
heroText: "SUPER GRAPH"
|
||||
tagline: Get an instant GraphQL API for your Rails apps.
|
||||
actionText: Get Started →
|
||||
heroText: "Super Graph"
|
||||
heroImage: /super-graph-web-ui-half.png
|
||||
heroImageMobile: /super-graph-web-ui.png
|
||||
tagline: Build web products faster. Instant APIs for your apps
|
||||
longTagline: Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries.
|
||||
actionText: Get Started, Free, Open Source →
|
||||
actionLink: /guide
|
||||
|
||||
description: Super Graph can automatically learn a Postgres database and instantly serve it as a fast and secured GraphQL API. It comes with tools to create a new app and manage it's database. You get it all, a very productive developer and a highly scalable app backend. It's designed to work well on serverless platforms by Google, AWS, Microsoft, etc. The goal is to save you a ton of time and money so you can focus on you're apps core value.
|
||||
|
||||
features:
|
||||
- title: Simple
|
||||
details: Easy config file, quick to deploy, No code needed. It just works.
|
||||
- title: High Performance
|
||||
details: Converts your GraphQL query into a fast SQL one.
|
||||
- title: Written in GO
|
||||
details: Go is a language created at Google to build secure and fast web services.
|
||||
details: Compiles your GraphQL into a fast SQL query in realtime.
|
||||
- title: Ruby-on-Rails
|
||||
details: Can read Rails cookies and supports rails database conventions.
|
||||
- title: Serverless
|
||||
details: Instant startup for scale to zero environments like Google Cloud Run, App Engine, AWS Lambda
|
||||
- title: Go Lang
|
||||
details: Go is a language created at Google to build fast and secure web services.
|
||||
- title: Free and Open Source
|
||||
details: Not a VC funded startup. Not even a startup just good old open source code
|
||||
|
||||
footer: MIT Licensed | Copyright © 2018-present Vikram Rangnekar
|
||||
---
|
||||
|
||||

|
||||
|
||||
Without writing a line of code get an instant high-performance GraphQL API for your Ruby-on-Rails app. Super Graph will automatically understand your apps database and expose a secure, fast and complete GraphQL API for it. Built in support for Rails authentication and JWT tokens.
|
||||
|
||||
## Try it out
|
||||
|
||||
```bash
|
||||
# setup the demo rails app database
|
||||
docker-compose run web rake db:create db:migrate db:seed
|
||||
|
||||
# run the demo rails app and the Super Graph service
|
||||
docker-compose -f docker-compose.image.yml up
|
||||
|
||||
# try out the GraphQL api with the web ui in your browser
|
||||
open http://localhost:8080
|
||||
|
||||
```
|
||||
|
||||
## Try this GraphQL query
|
||||
|
||||
```graphql
|
||||
query {
|
||||
users {
|
||||
id
|
||||
email
|
||||
picture : avatar
|
||||
products(limit: 2, where: { price: { gt: 10 } }) {
|
||||
id
|
||||
name
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Why I built Super Graph?
|
||||
|
||||
I have a Rails app that gets a bit of traffic. While planning to improve the UI using React or Vue I found that my current APIs didn't have what we needed. I'd have to add more controllers and ensure they are providing the right amount of data. This required designing new APIs and making sure they match what the webdevs need. While this is all to common work I was bored and there had to be a better way.
|
||||
|
||||
All my Rails controllers were esentially wrappers around database queries and its not exactly fun writing more of them.
|
||||
|
||||
I always liked GraphQL it made everything so simple. Web devs can use GraphQL to fetch exactly the data they need. There is one small issue however you still hasve to write a lot of the same database code.
|
||||
|
||||
I wanted a GraphQL server that just worked the second you deployed it without having to write a line of code.
|
||||
|
||||
And so after a lot of coffee and some avocado toasts Super Graph was born. An instant GraphQL API service that's high performance and easy to deploy. I hope you find it as useful as I do and there's a lot more coming so hit that :star: to stay in the loop.
|
||||
|
||||
## Say hello
|
||||
|
||||
[twitter.com/dosco](https://twitter.com/dosco)
|
145
docs/deploy.md
Normal file
@ -0,0 +1,145 @@
|
||||
---
|
||||
sidebar: auto
|
||||
---
|
||||
|
||||
## How to deploy Super Graph
|
||||
|
||||
Since you're reading this you're probably considering deploying Super Graph. You're in luck it's really easy and there are several ways to choose from. Keep in mind Super Graph can be used as a pre-built docker image or you can easily customize it and build your own docker image.
|
||||
|
||||
### Alongside an existing Rails app
|
||||
|
||||
Super Graph can read Rails session cookies, like those created by authentication gems (Devise or Warden). Based on how you've configured your Rails app the cookie can be signed, encrypted, both, include the user ID or just have the ID of the session. If you have choosen to use Redis or Memcache as your session store then Super Graph can read the session cookie and then lookup the user in the session store. In short it works really well with almost all Rails apps.
|
||||
|
||||
For any of this to work Super Graph must be deployed in a way that make the browser send the apps cookie to it along with the GraphQL query. That means Super Graph needs to be either on the same domain as your app or on a subdomain.
|
||||
|
||||
::: tip I need an example
|
||||
Say your Rails app runs on `myrailsapp.com` then Super Graph should be on the same domain or on a subdomain like `graphql.myrailsapp.com`. If you choose subdomain then remeber read the [Deploy under a subdomain](#deploy-under-a-subdomain) section.
|
||||
:::
|
||||
|
||||
### Custom Docker Image
|
||||
|
||||
You might find the need to customize the Super Graph config file to fit your app and then package it into a docker image. This is easy to do below is an example `Dockerfile` to do exactly this. And to build it `docker build -t my-super-graph .`
|
||||
|
||||
```docker
|
||||
FROM dosco/super-graph:latest
|
||||
WORKDIR /app
|
||||
COPY *.yml ./
|
||||
```
|
||||
|
||||
### Deploy under a subdomain
|
||||
|
||||
For this to work you have to ensure that the option `:domain => :all` is added to your Rails app config `Application.config.session_store` this will cause your rails app to create session cookies that can be shared with sub-domains. More info here [/sharing-a-devise-user-session-across-subdomains-with-rails](http://excid3.com/blog/sharing-a-devise-user-session-across-subdomains-with-rails-3/)
|
||||
|
||||
### With an NGINX loadbalancer
|
||||
|
||||
If you're infrastructure is fronted by NGINX then it should be configured so that all requests to your GraphQL API path are proxyed to Super Graph. In the example NGINX config below all requests to the path `/api/v1/graphql` are routed to wherever you have Super Graph installed within your architecture. This example is derived from the config file example at [/microservices-nginx-gateway/nginx.conf](https://github.com/launchany/microservices-nginx-gateway/blob/master/nginx.conf)
|
||||
|
||||
::: tip NGINX with sub-domain
|
||||
Yes, NGINX is very flexible and you can configure it to keep Super Graph a subdomain instead of on the same top level domain. I'm sure a little Googleing will get you some great example configs for that.
|
||||
:::
|
||||
|
||||
```nginx
|
||||
# Configuration for the server
|
||||
server {
|
||||
|
||||
# Running port
|
||||
listen 80;
|
||||
|
||||
# Proxy the graphql api path to Super Graph
|
||||
location /api/v1/graphql {
|
||||
|
||||
proxy_pass http://super-graph-service:8080;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
|
||||
}
|
||||
|
||||
# Proxying all other paths to your Rails app
|
||||
location / {
|
||||
|
||||
proxy_pass http://your-rails-app:3000;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### On Kubernetes
|
||||
|
||||
If your Rails app runs on Kubernetes then ensure you have an ingress config deployed that points the path to the service that you have deployed Super Graph under.
|
||||
|
||||
#### Ingress config
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: simple-rails-app
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- host: myrailsapp.com
|
||||
http:
|
||||
paths:
|
||||
- path: /api/v1/graphql
|
||||
backend:
|
||||
serviceName: graphql-service
|
||||
servicePort: 8080
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: rails-app
|
||||
servicePort: 3000
|
||||
```
|
||||
|
||||
#### Service and deployment config
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: graphql-service
|
||||
labels:
|
||||
run: super-graph
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
selector:
|
||||
run: super-graph
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: super-graph
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
run: super-graph
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
run: super-graph
|
||||
spec:
|
||||
containers:
|
||||
- name: super-graph
|
||||
image: docker.io/dosco/super-graph:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
```
|
||||
|
||||
### JWT tokens (Auth0, etc)
|
||||
|
||||
In that case deploy under a subdomain and configure this service to use JWT authentication. You will need the public key file or secret key. Ensure your web app passes the JWT token with every GQL request in the Authorize header as a `bearer` token.
|
||||
|
||||
|
739
docs/guide.md
@ -4,21 +4,53 @@ sidebar: auto
|
||||
|
||||
# Guide to Super Graph
|
||||
|
||||
Without writing a line of code get an instant high-performance GraphQL API for your Ruby-on-Rails app. Super Graph will automatically understand your apps database and expose a secure, fast and complete GraphQL API for it. Built in support for Rails authentication and JWT tokens.
|
||||
Get an instant high performance GraphQL API for Postgres. No code needed. GraphQL is automatically transformed into efficient database queries. Also Designed to integrate with your Rails apps.
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- Works with Rails database schemas
|
||||
- Automatically learns schemas and relationships
|
||||
- Belongs-To, One-To-Many and Many-To-Many table relationships
|
||||
- Full text search and Aggregations
|
||||
- Rails Auth supported (Redis, Memcache, Cookie)
|
||||
- JWT tokens supported (Auth0, etc)
|
||||
- Join with remote REST APIs
|
||||
- Highly optimized and fast Postgres SQL queries
|
||||
- Support GraphQL queries and mutations
|
||||
- Configure with a simple config file
|
||||
- High performance GO codebase
|
||||
- Tiny docker image and low memory requirements
|
||||
- Database migrations tool
|
||||
- Write database seeding scripts in Javascript
|
||||
|
||||
We currently support the `query` action which is used for fetching data. Support for `mutation` and `subscriptions` is work in progress. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10
|
||||
## Try the demo app
|
||||
|
||||
```bash
|
||||
# download the Docker compose config for the demo
|
||||
curl -L -o demo.yml https://bit.ly/2mq05lW
|
||||
|
||||
# setup the demo rails app & database and run it
|
||||
docker-compose -f demo.yml run rails_app rake db:create db:migrate db:seed
|
||||
|
||||
# run the demo
|
||||
docker-compose -f demo.yml up
|
||||
|
||||
# signin to the demo app (user1@demo.com / 123456)
|
||||
open http://localhost:3000
|
||||
|
||||
# try the super graph web ui
|
||||
open http://localhost:8080
|
||||
```
|
||||
|
||||
::: warning DEMO REQUIREMENTS
|
||||
This demo requires `docker` you can either install it using `brew` or from the
|
||||
docker website [https://docs.docker.com/docker-for-mac/install/](https://docs.docker.com/docker-for-mac/install/)
|
||||
:::
|
||||
|
||||
#### Trying out GraphQL
|
||||
|
||||
We currently fully support queries and mutations. Support for `subscriptions` is work in progress. For example the below GraphQL query would fetch two products that belong to the current user where the price is greater than 10.
|
||||
|
||||
#### GQL Query
|
||||
|
||||
@ -40,6 +72,27 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
In another example the below GraphQL mutation would insert a product into the database. The first part of the below example is the variable data and the second half is the GraphQL mutation. For mutations data has to always ben passed as a variable.
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The above GraphQL query returns the JSON result below. It handles all
|
||||
kinds of complexity without you having to writing a line of code.
|
||||
|
||||
@ -71,26 +124,6 @@ For example there is a while greater than `gt` and a limit clause on a child fie
|
||||
}
|
||||
```
|
||||
|
||||
The above command will download the latest docker image for Super Graph and use it to run an example that includes a Postgres DB and a simple Rails ecommerce store app.
|
||||
|
||||
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes.
|
||||
|
||||
```bash
|
||||
|
||||
# yarn is needed to build the web ui
|
||||
brew install yarn
|
||||
|
||||
# yarn install dependencies and build the web ui
|
||||
(cd web && yarn install && yarn build)
|
||||
|
||||
# generate some stuff the go code needs
|
||||
go generate ./...
|
||||
|
||||
# start super graph in development mode with a change watcher
|
||||
docker-compose up
|
||||
|
||||
```
|
||||
|
||||
#### Try with an authenticated user
|
||||
|
||||
In development mode you can use the `X-User-ID: 4` header to set a user id so you don't have to worries about cookies etc. This can be set using the *HTTP Headers* tab at the bottom of the web UI you'll see when you visit the above link. You can also directly run queries from the commandline like below.
|
||||
@ -104,6 +137,325 @@ curl 'http://localhost:8080/api/v1/graphql' \
|
||||
-H 'content-type: application/json' \
|
||||
-H 'X-User-ID: 5' \
|
||||
--data-binary '{"query":"{ products { name price users { email }}}"}'
|
||||
```
|
||||
|
||||
## Get Started
|
||||
|
||||
Super Graph can generate your initial app for you. The generated app will have config files, database migrations and seed files among other things like docker related files.
|
||||
|
||||
You can then add your database schema to the migrations, maybe create some seed data using the seed script and launch Super Graph. You're now good to go and can start working on your UI frontend in React, Vue or whatever.
|
||||
|
||||
```bash
|
||||
# use the below command to download and install Super Graph. You will need Go 1.13 or above
|
||||
GO111MODULE=on go get -u github.com/dosco/super-graph
|
||||
|
||||
# create a new app and change to it's directory
|
||||
super-graph new blog; cd blog
|
||||
|
||||
# setup the app database and seed it with fake data. Docker compose will start a Postgres database for your app
|
||||
docker-compose run blog_api ./super-graph db:setup
|
||||
|
||||
# and finally launch Super Graph configured for your app
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
Lets take a look at the files generated by Super Graph when you create a new app
|
||||
|
||||
```bash
|
||||
super-graph new blog
|
||||
|
||||
> created 'blog'
|
||||
> created 'blog/Dockerfile'
|
||||
> created 'blog/docker-compose.yml'
|
||||
> created 'blog/config'
|
||||
> created 'blog/config/dev.yml'
|
||||
> created 'blog/config/prod.yml'
|
||||
> created 'blog/config/seed.js'
|
||||
> created 'blog/config/migrations'
|
||||
> created 'blog/config/migrations/100_init.sql'
|
||||
> app 'blog' initialized
|
||||
```
|
||||
|
||||
### Docker files
|
||||
|
||||
Docker Compose is a great way to run multiple services while developing on your desktop or laptop. In our case we need Postgres and Super Graph to both be running and the `docker-compose.yml` is configured to do just that. The Super Graph service is named after your app postfixed with `_api`. The Dockerfile can be used build a containr of your app for production deployment.
|
||||
|
||||
```bash
|
||||
docker-compose run blog_api ./super-graph help
|
||||
```
|
||||
|
||||
### Config files
|
||||
|
||||
All the config files needs to configure Super Graph for your app are contained in this folder for starters you have two `dev.yaml` and `prod.yaml`. When the `GO_ENV` environment variable is set to `development` then `dev.yaml` is used and the prod one when it's set to `production`. Stage and Test are the other two environment options, but you can set the `GO_ENV` to whatever you like (eg. `alpha-test`) and Super Graph will look for a yaml file with that name to load config from.
|
||||
|
||||
### Seed.js
|
||||
|
||||
Having data flowing through your API makes building your frontend UI so much easier. When creafting say a user profile wouldn't it be nice for the API to return a fake user with name, picture and all. This is why having the ability to seed your database is important. Seeding cn also be used in production to setup some initial users like the admins or to add an initial set of products to a ecommerce store.
|
||||
|
||||
Super Graph makes this easy by allowing you to write your seeding script in plan old Javascript. The below file that auto-generated for new apps uses our built-in functions `fake` and `graphql` to generate fake data and use GraphQL mutations to insert it into the database.
|
||||
|
||||
```javascript
|
||||
// Example script to seed database
|
||||
|
||||
var users = [];
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
var data = {
|
||||
full_name: fake.name(),
|
||||
email: fake.email()
|
||||
}
|
||||
|
||||
var res = graphql(" \
|
||||
mutation { \
|
||||
user(insert: $data) { \
|
||||
id \
|
||||
} \
|
||||
}", { data: data })
|
||||
|
||||
users.push(res.user)
|
||||
}
|
||||
```
|
||||
|
||||
You can generate the following fake data for your seeding purposes. Below is the list of fake data functions supported by the built-in fake data library. For example `fake.image_url()` will generate a fake image url or `fake.shuffle_strings(['hello', 'world', 'cool'])` will generate a randomly shuffled version of that array of strings or `fake.rand_string(['hello', 'world', 'cool'])` will return a random string from the array provided.
|
||||
|
||||
```
|
||||
// Person
|
||||
person
|
||||
name
|
||||
name_prefix
|
||||
name_suffix
|
||||
first_name
|
||||
last_name
|
||||
gender
|
||||
ssn
|
||||
contact
|
||||
email
|
||||
phone
|
||||
phone_formatted
|
||||
username
|
||||
password
|
||||
|
||||
// Address
|
||||
address
|
||||
city
|
||||
country
|
||||
country_abr
|
||||
state
|
||||
state_abr
|
||||
status_code
|
||||
street
|
||||
street_name
|
||||
street_number
|
||||
street_prefix
|
||||
street_suffix
|
||||
zip
|
||||
latitude
|
||||
latitude_in_range
|
||||
longitude
|
||||
longitude_in_range
|
||||
|
||||
// Beer
|
||||
beer_alcohol
|
||||
beer_hop
|
||||
beer_ibu
|
||||
beer_blg
|
||||
beer_malt
|
||||
beer_name
|
||||
beer_style
|
||||
beer_yeast
|
||||
|
||||
// Cars
|
||||
vehicle
|
||||
vehicle_type
|
||||
car_maker
|
||||
car_model
|
||||
fuel_type
|
||||
transmission_gear_type
|
||||
|
||||
// Text
|
||||
word
|
||||
sentence
|
||||
paragrph
|
||||
question
|
||||
quote
|
||||
|
||||
// Misc
|
||||
generate
|
||||
boolean
|
||||
uuid
|
||||
|
||||
// Colors
|
||||
color
|
||||
hex_color
|
||||
rgb_color
|
||||
safe_color
|
||||
|
||||
// Internet
|
||||
url
|
||||
image_url
|
||||
domain_name
|
||||
domain_suffix
|
||||
ipv4_address
|
||||
ipv6_address
|
||||
simple_status_code
|
||||
http_method
|
||||
user_agent
|
||||
user_agent_firefox
|
||||
user_agent_chrome
|
||||
user_agent_opera
|
||||
user_agent_safari
|
||||
|
||||
// Date / Time
|
||||
date
|
||||
date_range
|
||||
nano_second
|
||||
second
|
||||
minute
|
||||
hour
|
||||
month
|
||||
day
|
||||
weekday
|
||||
year
|
||||
timezone
|
||||
timezone_abv
|
||||
timezone_full
|
||||
timezone_offset
|
||||
|
||||
// Payment
|
||||
price
|
||||
credit_card
|
||||
credit_card_cvv
|
||||
credit_card_number
|
||||
credit_card_number_luhn
|
||||
credit_card_type
|
||||
currency
|
||||
currency_long
|
||||
currency_short
|
||||
|
||||
// Company
|
||||
bs
|
||||
buzzword
|
||||
company
|
||||
company_suffix
|
||||
job
|
||||
job_description
|
||||
job_level
|
||||
job_title
|
||||
|
||||
// Hacker
|
||||
hacker_abbreviation
|
||||
hacker_adjective
|
||||
hacker_ingverb
|
||||
hacker_noun
|
||||
hacker_phrase
|
||||
hacker_verb
|
||||
|
||||
//Hipster
|
||||
hipster_word
|
||||
hipster_paragraph
|
||||
hipster_sentence
|
||||
|
||||
// File
|
||||
extension
|
||||
mine_type
|
||||
|
||||
// Numbers
|
||||
number
|
||||
numerify
|
||||
int8
|
||||
int16
|
||||
int32
|
||||
int64
|
||||
uint8
|
||||
uint16
|
||||
uint32
|
||||
uint64
|
||||
float32
|
||||
float32_range
|
||||
float64
|
||||
float64_range
|
||||
shuffle_ints
|
||||
mac_address
|
||||
|
||||
//String
|
||||
digit
|
||||
letter
|
||||
lexify
|
||||
rand_string
|
||||
shuffle_strings
|
||||
numerify
|
||||
```
|
||||
|
||||
### Migrations
|
||||
|
||||
Easy database migrations is the most important thing when building products backend by a relational database. We make it super easy to manage and migrate your database.
|
||||
|
||||
```bash
|
||||
super-graph db:new create_users
|
||||
> created migration 'config/migrations/101_create_users.sql'
|
||||
```
|
||||
|
||||
Migrations in Super Graph are plain old Postgres SQL. Here's an example for the above migration.
|
||||
|
||||
```sql
|
||||
-- Write your migrate up statements here
|
||||
|
||||
CREATE TABLE public.users (
|
||||
id bigint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
full_name text,
|
||||
email text UNIQUE NOT NULL CHECK (length(email) < 255),
|
||||
created_at timestamptz NOT NULL NOT NULL DEFAULT NOW(),
|
||||
updated_at timestamptz NOT NULL NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
-- Write your down migrate statements here. If this migration is irreversible
|
||||
-- then delete the separator line above.
|
||||
|
||||
DROP TABLE public.users
|
||||
```
|
||||
|
||||
We would encourage you to leverage triggers to maintain consistancy of your data for example here are a couple triggers that you can add to you init migration and across your tables.
|
||||
|
||||
```sql
|
||||
-- This trigger script will set the updated_at column everytime a row is updated
|
||||
CREATE OR REPLACE FUNCTION trigger_set_updated_at()
|
||||
RETURNS TRIGGER SET SCHEMA 'public' LANGUAGE 'plpgsql' AS $$
|
||||
BEGIN
|
||||
new.updated_at = now();
|
||||
RETURN new;
|
||||
END;
|
||||
$$;
|
||||
|
||||
...
|
||||
|
||||
-- An exmple of adding this trigger to the users table
|
||||
CREATE TRIGGER set_updated_at BEFORE UPDATE ON public.users
|
||||
FOR EACH ROW EXECUTE PROCEDURE trigger_set_updated_at();
|
||||
```
|
||||
|
||||
```sql
|
||||
-- This trigger script will set the user_id column to the current
|
||||
-- Super Graph user.id value everytime a row is created or updated
|
||||
CREATE OR REPLACE FUNCTION trigger_set_user_id()
|
||||
RETURNS TRIGGER SET SCHEMA 'public' LANGUAGE 'plpgsql' AS $$
|
||||
BEGIN
|
||||
IF TG_OP = 'UPDATE' THEN
|
||||
new.user_id = old.user_id;
|
||||
ELSE
|
||||
new.user_id = current_setting('user.id')::int;
|
||||
END IF;
|
||||
|
||||
RETURN new;
|
||||
END;
|
||||
$$;
|
||||
|
||||
...
|
||||
|
||||
-- An exmple of adding this trigger to the blog_posts table
|
||||
CREATE TRIGGER set_user_id BEFORE INSERT OR UPDATE ON public.blog_posts
|
||||
FOR EACH ROW EXECUTE PROCEDURE trigger_set_user_id();
|
||||
|
||||
```
|
||||
|
||||
@ -139,13 +491,13 @@ Postgres also supports full text search using a TSV index. Super Graph makes it
|
||||
|
||||
```graphql
|
||||
query {
|
||||
products(search "amazing") {
|
||||
products(search: "ale") {
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Complex queries (Where)
|
||||
### Advanced queries
|
||||
|
||||
Super Graph support complex queries where you can add filters, ordering,offsets and limits on the query.
|
||||
|
||||
@ -182,7 +534,7 @@ contains | column: { contains: [1, 2, 4] } | Is this array/json column a subset
|
||||
contained_in | column: { contains: "{'a':1, 'b':2}" } | Is this array/json column a subset of these value
|
||||
is_null | column: { is_null: true } | Is column value null or not
|
||||
|
||||
### Aggregation (Max, Count, etc)
|
||||
### Aggregations
|
||||
|
||||
You will often find the need to fetch aggregated values from the database such as `count`, `max`, `min`, etc. This is simple to do with GraphQL, just prefix the aggregation name to the field name that you want to aggregrate like `count_id`. The below query will group products by name and find the minimum price for each group. Notice the `min_price` field we're adding `min_` to price.
|
||||
|
||||
@ -234,6 +586,163 @@ query {
|
||||
}
|
||||
```
|
||||
|
||||
## Mutations
|
||||
|
||||
In GraphQL mutations is the operation type for when you need to modify data. Super Graph supports the `insert`, `update` and `delete` database operations. Here are some examples.
|
||||
|
||||
When using mutations the data must be passed as variables since Super Graphs compiles the query into an prepared statement in the database for maximum speed. Prepared statements are are functions in your code when called they accept arguments and your variables are passed in as those arguments.
|
||||
|
||||
### Insert
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk insert
|
||||
|
||||
```json
|
||||
{
|
||||
"data": [{
|
||||
"name": "Art of Computer Programming",
|
||||
"description": "The Art of Computer Programming (TAOCP) is a comprehensive monograph written by computer scientist Donald Knuth",
|
||||
"price": 30.5
|
||||
},
|
||||
{
|
||||
"name": "Compilers: Principles, Techniques, and Tools",
|
||||
"description": "Known to professors, students, and developers worldwide as the 'Dragon Book' is available in a new edition",
|
||||
"price": 93.74
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(insert: $data) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Update
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 200.0
|
||||
},
|
||||
"product_id": 5
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(update: $data, id: $product_id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk update
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 500.0
|
||||
},
|
||||
"gt_product_id": 450.0,
|
||||
"lt_product_id:": 550.0
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(update: $data, where: {
|
||||
price: { gt: $gt_product_id, lt: lt_product_id }
|
||||
}) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Delete
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 500.0
|
||||
},
|
||||
"product_id": 5
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(delete: true, id: $product_id) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk delete
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"price": 500.0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```graphql
|
||||
mutation {
|
||||
product(delete: true, where: { price: { eq: { 500.0 } } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Using variables
|
||||
|
||||
Variables (`$product_id`) and their values (`"product_id": 5`) can be passed along side the GraphQL query. Using variables makes for better client side code as well as improved server side SQL query caching. The build-in web-ui also supports setting variables. Not having to manipulate your GraphQL query string to insert values into it makes for cleaner
|
||||
and better client side code.
|
||||
|
||||
```javascript
|
||||
// Define the request object keeping the query and the variables seperate
|
||||
var req = {
|
||||
query: '{ product(id: $product_id) { name } }' ,
|
||||
variables: { "product_id": 5 }
|
||||
}
|
||||
|
||||
// Use the fetch api to make the query
|
||||
fetch('http://localhost:8080/api/v1/graphql', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(req),
|
||||
})
|
||||
.then(res => res.json())
|
||||
.then(res => console.log(res.data));
|
||||
```
|
||||
|
||||
### Full text search
|
||||
|
||||
Every app these days needs search. Enought his often means reaching for something heavy like Solr. While this will work why add complexity to your infrastructure when Postgres has really great
|
||||
@ -319,9 +828,86 @@ class AddSearchColumn < ActiveRecord::Migration[5.1]
|
||||
end
|
||||
```
|
||||
|
||||
## Remote Joins
|
||||
|
||||
It often happens that after fetching some data from the DB we need to call another API to fetch some more data and all this combined into a single JSON response. For example along with a list of users you need their last 5 payments from Stripe. This requires you to query your DB for the users and Stripe for the payments. Super Graph handles all this for you also only the fields you requested from the Stripe API are returned.
|
||||
|
||||
::: tip Is this fast?
|
||||
Super Graph is able fetch remote data and merge it with the DB response in an efficient manner. Several optimizations such as parallel HTTP requests and a zero-allocation JSON merge algorithm makes this very fast. All of this without you having to write a line of code.
|
||||
:::
|
||||
|
||||
For example you need to list the last 3 payments made by a user. You will first need to look up the user in the database and then call the Stripe API to fetch his last 3 payments. For this to work your user table in the db has a `customer_id` column that contains his Stripe customer ID.
|
||||
|
||||
Similiarly you could also fetch the users last tweet, lead info from Salesforce or whatever else you need. It's fine to mix up several different `remote joins` into a single GraphQL query.
|
||||
|
||||
### Stripe API example
|
||||
|
||||
The configuration is self explanatory. A `payments` field has been added under the `customers` table. This field is added to the `remotes` subsection that defines fields associated with `customers` that are remote and not real database columns.
|
||||
|
||||
The `id` parameter maps a column from the `customers` table to the `$id` variable. In this case it maps `$id` to the `customer_id` column.
|
||||
|
||||
```yaml
|
||||
tables:
|
||||
- name: customers
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# debug: true
|
||||
# pass_headers:
|
||||
# - cookie
|
||||
# - host
|
||||
set_headers:
|
||||
- name: Authorization
|
||||
value: Bearer <stripe_api_key>
|
||||
```
|
||||
|
||||
#### How do I make use of this?
|
||||
|
||||
Just include `payments` like you would any other GraphQL selector under the `customers` selector. Super Graph will call the configured API for you and stitch (merge) the JSON the API sends back with the JSON generated from the database query. GraphQL features like aliases and fields all work.
|
||||
|
||||
```graphql
|
||||
query {
|
||||
customers {
|
||||
id
|
||||
email
|
||||
payments {
|
||||
customer_id
|
||||
amount
|
||||
billing_details
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
And voila here is the result. You get all of this advanced and honestly complex querying capability without writing a single line of code.
|
||||
|
||||
```json
|
||||
"data": {
|
||||
"customers": [
|
||||
{
|
||||
"id": 1,
|
||||
"email": "linseymertz@reilly.co",
|
||||
"payments": [
|
||||
{
|
||||
"customer_id": "cus_YCj3ndB5Mz",
|
||||
"amount": 100,
|
||||
"billing_details": {
|
||||
"address": "1 Infinity Drive",
|
||||
"zipcode": "94024"
|
||||
}
|
||||
},
|
||||
...
|
||||
```
|
||||
|
||||
Even tracing data is availble in the Super Graph web UI if tracing is enabled in the config. By default it is enabled in development. Additionally there you can set `debug: true` to enable http request / response dumping to help with debugging.
|
||||
|
||||

|
||||
|
||||
## Authentication
|
||||
|
||||
You can only have one type of auth enabled. You can either pick Rails or JWT. Uncomment the one you use and leave the rest commented out.
|
||||
You can only have one type of auth enabled. You can either pick Rails or JWT.
|
||||
|
||||
### Rails Auth (Devise / Warden)
|
||||
|
||||
@ -335,32 +921,40 @@ Super Graph can handle all these variations including the old and new session fo
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails_cookie
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails_cookie:
|
||||
secret_key_base: caf335bfcfdb04e50db5bb0a4d67ab9...
|
||||
rails:
|
||||
# Rails version this is used for reading the
|
||||
# various cookies formats.
|
||||
version: 5.2
|
||||
|
||||
# Found in 'Rails.application.config.secret_key_base'
|
||||
secret_key_base: 0a248500a64c01184edb4d7ad3a805488f8097ac761b76aaa6c17c01dcb7af03a2f18ba61b2868134b9c7b79a122bc0dadff4367414a2d173297bfea92be5566
|
||||
|
||||
```
|
||||
|
||||
#### Memcache session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails_memcache
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails_memcache:
|
||||
host: 127.0.0.1
|
||||
rails:
|
||||
# Memcache remote cookie store.
|
||||
url: memcache://127.0.0.1
|
||||
```
|
||||
|
||||
#### Redis session store
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
type: rails_redis
|
||||
type: rails
|
||||
cookie: _app_session
|
||||
|
||||
rails_redis:
|
||||
rails:
|
||||
# Redis remote cookie store
|
||||
url: redis://127.0.0.1:6379
|
||||
password: ""
|
||||
max_idle: 80
|
||||
@ -372,10 +966,10 @@ auth:
|
||||
```yaml
|
||||
auth:
|
||||
type: jwt
|
||||
cookie: _app_session
|
||||
|
||||
jwt:
|
||||
provider: auth0 #none
|
||||
# the two providers are 'auth0' and 'none'
|
||||
provider: auth0
|
||||
secret: abc335bfcfdb04e50db5bb0a4d67ab9
|
||||
public_key_file: /secrets/public_key.pem
|
||||
public_key_type: ecdsa #rsa
|
||||
@ -396,15 +990,28 @@ Configuration files can either be in YAML or JSON their names are derived from t
|
||||
We're tried to ensure that the config file is self documenting and easy to work with.
|
||||
|
||||
```yaml
|
||||
title: Super Graph Development
|
||||
app_name: "Super Graph Development"
|
||||
host_port: 0.0.0.0:8080
|
||||
web_ui: true
|
||||
debug_level: 1
|
||||
enable_tracing: false
|
||||
|
||||
# debug, info, warn, error, fatal, panic, disable
|
||||
log_level: "info"
|
||||
|
||||
# Disable this in development to get a list of
|
||||
# queries used. When enabled super graph
|
||||
# will only allow queries from this list
|
||||
# List saved to ./config/allow.list
|
||||
use_allow_list: true
|
||||
|
||||
# Throw a 401 on auth failure for queries that need auth
|
||||
# valid values: always, per_query, never
|
||||
auth_fail_block: never
|
||||
auth_fail_block: always
|
||||
|
||||
# Latency tracing for database queries and remote joins
|
||||
# the resulting latency information is returned with the
|
||||
# response
|
||||
enable_tracing: true
|
||||
|
||||
# Postgres related environment Variables
|
||||
# SG_DATABASE_HOST
|
||||
@ -475,7 +1082,7 @@ database:
|
||||
defaults:
|
||||
filter: ["{ user_id: { eq: $user_id } }"]
|
||||
|
||||
# Fields and table names that you wish to block
|
||||
# Field and table names that you wish to block
|
||||
blacklist:
|
||||
- ar_internal_metadata
|
||||
- schema_migrations
|
||||
@ -484,7 +1091,7 @@ database:
|
||||
- encrypted
|
||||
- token
|
||||
|
||||
fields:
|
||||
tables:
|
||||
- name: users
|
||||
# This filter will overwrite defaults.filter
|
||||
filter: ["{ id: { eq: $user_id } }"]
|
||||
@ -501,6 +1108,18 @@ database:
|
||||
# even defaults.filter
|
||||
filter: none
|
||||
|
||||
remotes:
|
||||
- name: payments
|
||||
id: stripe_id
|
||||
url: http://rails_app:3000/stripe/$id
|
||||
path: data
|
||||
# pass_headers:
|
||||
# - cookie
|
||||
# - host
|
||||
set_headers:
|
||||
- name: Authorization
|
||||
value: Bearer <stripe_api_key>
|
||||
|
||||
- # You can create new fields that have a
|
||||
# real db table backing them
|
||||
name: me
|
||||
@ -531,33 +1150,29 @@ SG_AUTH_RAILS_REDIS_PASSWORD
|
||||
SG_AUTH_JWT_PUBLIC_KEY_FILE
|
||||
```
|
||||
|
||||
## Deploying Super Graph
|
||||
## Developing Super Graph
|
||||
|
||||
How do I deploy the Super Graph service with my existing rails app? You have several options here. Esentially you need to ensure your app's session cookie will be passed to this service.
|
||||
If you want to build and run Super Graph from code then the below commands will build the web ui and launch Super Graph in developer mode with a watcher to rebuild on code changes. And the demo rails app is also launched to make it essier to test changes.
|
||||
|
||||
### Custom Docker Image
|
||||
```bash
|
||||
|
||||
Create a `Dockerfile` like the one below to roll your own
|
||||
custom Super Graph docker image. And to build it `docker build -t my-super-graph .`
|
||||
# yarn is needed to build the web ui
|
||||
brew install yarn
|
||||
|
||||
# yarn install dependencies and build the web ui
|
||||
(cd web && yarn install && yarn build)
|
||||
|
||||
# generate some stuff the go code needs
|
||||
go generate ./...
|
||||
|
||||
# do this the only the time to setup the database
|
||||
docker-compose run rails_app rake db:create db:migrate db:seed
|
||||
|
||||
# start super graph in development mode with a change watcher
|
||||
docker-compose up
|
||||
|
||||
```docker
|
||||
FROM dosco/super-graph:latest
|
||||
WORKDIR /app
|
||||
COPY *.yml ./
|
||||
```
|
||||
|
||||
### Deploy under a subdomain
|
||||
For this to work you have to ensure that the option `:domain => :all` is added to your rails app config `Application.config.session_store` this will cause your rails app to create session cookies that can be shared with sub-domains. More info here <http://excid3.com/blog/sharing-a-devise-user-session-across-subdomains-with-rails-3/>
|
||||
|
||||
### With an NGINX loadbalancer
|
||||
I'm sure you know how to configure it so that the Super Graph endpoint path `/api/v1/graphql` is routed to wherever you have this service installed within your architecture.
|
||||
|
||||
### On Kubernetes
|
||||
If your Rails app runs on Kubernetes then ensure you have an ingress config deployed that points the path to the service that you have deployed Super Graph under.
|
||||
|
||||
### JWT tokens (Auth0, etc)
|
||||
In that case deploy under a subdomain and configure this service to use JWT authentication. You will need the public key file or secret key. Ensure your web app passes the JWT token with every GQL request in the Authorize header as a `bearer` token.
|
||||
|
||||
## MIT License
|
||||
|
||||
MIT Licensed | Copyright © 2018-present Vikram Rangnekar
|
@ -4,7 +4,11 @@
|
||||
"build": "vuepress build"
|
||||
},
|
||||
"devDependencies": {
|
||||
"vuepress": "^0.14.10",
|
||||
"@silvanite/vuepress-plugin-tailwind": "^1.1.0",
|
||||
"autoprefixer": "^9.6.1",
|
||||
"postcss-import": "^12.0.1",
|
||||
"tailwindcss": "^1.0.6",
|
||||
"vuepress": "^1.0.0",
|
||||
"webpack-dev-middleware": "3.6.0"
|
||||
}
|
||||
}
|
||||
|
500
docs/tailwind.config.js
Normal file
@ -0,0 +1,500 @@
|
||||
// tailwind.config.js
|
||||
module.exports = {
|
||||
theme: {},
|
||||
variants: {},
|
||||
plugins: [],
|
||||
}
|
||||
|
||||
/*
|
||||
module.exports = {
|
||||
prefix: '',
|
||||
important: false,
|
||||
separator: ':',
|
||||
theme: {
|
||||
screens: {
|
||||
sm: '640px',
|
||||
md: '768px',
|
||||
lg: '1024px',
|
||||
xl: '1280px',
|
||||
},
|
||||
colors: {
|
||||
transparent: 'transparent',
|
||||
|
||||
black: '#000',
|
||||
white: '#fff',
|
||||
|
||||
gray: {
|
||||
100: '#f7fafc',
|
||||
200: '#edf2f7',
|
||||
300: '#e2e8f0',
|
||||
400: '#cbd5e0',
|
||||
500: '#a0aec0',
|
||||
600: '#718096',
|
||||
700: '#4a5568',
|
||||
800: '#2d3748',
|
||||
900: '#1a202c',
|
||||
},
|
||||
red: {
|
||||
100: '#fff5f5',
|
||||
200: '#fed7d7',
|
||||
300: '#feb2b2',
|
||||
400: '#fc8181',
|
||||
500: '#f56565',
|
||||
600: '#e53e3e',
|
||||
700: '#c53030',
|
||||
800: '#9b2c2c',
|
||||
900: '#742a2a',
|
||||
},
|
||||
orange: {
|
||||
100: '#fffaf0',
|
||||
200: '#feebc8',
|
||||
300: '#fbd38d',
|
||||
400: '#f6ad55',
|
||||
500: '#ed8936',
|
||||
600: '#dd6b20',
|
||||
700: '#c05621',
|
||||
800: '#9c4221',
|
||||
900: '#7b341e',
|
||||
},
|
||||
yellow: {
|
||||
100: '#fffff0',
|
||||
200: '#fefcbf',
|
||||
300: '#faf089',
|
||||
400: '#f6e05e',
|
||||
500: '#ecc94b',
|
||||
600: '#d69e2e',
|
||||
700: '#b7791f',
|
||||
800: '#975a16',
|
||||
900: '#744210',
|
||||
},
|
||||
green: {
|
||||
100: '#f0fff4',
|
||||
200: '#c6f6d5',
|
||||
300: '#9ae6b4',
|
||||
400: '#68d391',
|
||||
500: '#48bb78',
|
||||
600: '#38a169',
|
||||
700: '#2f855a',
|
||||
800: '#276749',
|
||||
900: '#22543d',
|
||||
},
|
||||
teal: {
|
||||
100: '#e6fffa',
|
||||
200: '#b2f5ea',
|
||||
300: '#81e6d9',
|
||||
400: '#4fd1c5',
|
||||
500: '#38b2ac',
|
||||
600: '#319795',
|
||||
700: '#2c7a7b',
|
||||
800: '#285e61',
|
||||
900: '#234e52',
|
||||
},
|
||||
blue: {
|
||||
100: '#ebf8ff',
|
||||
200: '#bee3f8',
|
||||
300: '#90cdf4',
|
||||
400: '#63b3ed',
|
||||
500: '#4299e1',
|
||||
600: '#3182ce',
|
||||
700: '#2b6cb0',
|
||||
800: '#2c5282',
|
||||
900: '#2a4365',
|
||||
},
|
||||
indigo: {
|
||||
100: '#ebf4ff',
|
||||
200: '#c3dafe',
|
||||
300: '#a3bffa',
|
||||
400: '#7f9cf5',
|
||||
500: '#667eea',
|
||||
600: '#5a67d8',
|
||||
700: '#4c51bf',
|
||||
800: '#434190',
|
||||
900: '#3c366b',
|
||||
},
|
||||
purple: {
|
||||
100: '#faf5ff',
|
||||
200: '#e9d8fd',
|
||||
300: '#d6bcfa',
|
||||
400: '#b794f4',
|
||||
500: '#9f7aea',
|
||||
600: '#805ad5',
|
||||
700: '#6b46c1',
|
||||
800: '#553c9a',
|
||||
900: '#44337a',
|
||||
},
|
||||
pink: {
|
||||
100: '#fff5f7',
|
||||
200: '#fed7e2',
|
||||
300: '#fbb6ce',
|
||||
400: '#f687b3',
|
||||
500: '#ed64a6',
|
||||
600: '#d53f8c',
|
||||
700: '#b83280',
|
||||
800: '#97266d',
|
||||
900: '#702459',
|
||||
},
|
||||
},
|
||||
spacing: {
|
||||
px: '1px',
|
||||
'0': '0',
|
||||
'1': '0.25rem',
|
||||
'2': '0.5rem',
|
||||
'3': '0.75rem',
|
||||
'4': '1rem',
|
||||
'5': '1.25rem',
|
||||
'6': '1.5rem',
|
||||
'8': '2rem',
|
||||
'10': '2.5rem',
|
||||
'12': '3rem',
|
||||
'16': '4rem',
|
||||
'20': '5rem',
|
||||
'24': '6rem',
|
||||
'32': '8rem',
|
||||
'40': '10rem',
|
||||
'48': '12rem',
|
||||
'56': '14rem',
|
||||
'64': '16rem',
|
||||
},
|
||||
backgroundColor: theme => theme('colors'),
|
||||
backgroundPosition: {
|
||||
bottom: 'bottom',
|
||||
center: 'center',
|
||||
left: 'left',
|
||||
'left-bottom': 'left bottom',
|
||||
'left-top': 'left top',
|
||||
right: 'right',
|
||||
'right-bottom': 'right bottom',
|
||||
'right-top': 'right top',
|
||||
top: 'top',
|
||||
},
|
||||
backgroundSize: {
|
||||
auto: 'auto',
|
||||
cover: 'cover',
|
||||
contain: 'contain',
|
||||
},
|
||||
borderColor: theme => ({
|
||||
...theme('colors'),
|
||||
default: theme('colors.gray.300', 'currentColor'),
|
||||
}),
|
||||
borderRadius: {
|
||||
none: '0',
|
||||
sm: '0.125rem',
|
||||
default: '0.25rem',
|
||||
lg: '0.5rem',
|
||||
full: '9999px',
|
||||
},
|
||||
borderWidth: {
|
||||
default: '1px',
|
||||
'0': '0',
|
||||
'2': '2px',
|
||||
'4': '4px',
|
||||
'8': '8px',
|
||||
},
|
||||
boxShadow: {
|
||||
default: '0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06)',
|
||||
md: '0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)',
|
||||
lg: '0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05)',
|
||||
xl: '0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04)',
|
||||
'2xl': '0 25px 50px -12px rgba(0, 0, 0, 0.25)',
|
||||
inner: 'inset 0 2px 4px 0 rgba(0, 0, 0, 0.06)',
|
||||
outline: '0 0 0 3px rgba(66, 153, 225, 0.5)',
|
||||
none: 'none',
|
||||
},
|
||||
container: {
|
||||
center: true
|
||||
},
|
||||
cursor: {
|
||||
auto: 'auto',
|
||||
default: 'default',
|
||||
pointer: 'pointer',
|
||||
wait: 'wait',
|
||||
text: 'text',
|
||||
move: 'move',
|
||||
'not-allowed': 'not-allowed',
|
||||
},
|
||||
fill: {
|
||||
current: 'currentColor',
|
||||
},
|
||||
flex: {
|
||||
'1': '1 1 0%',
|
||||
auto: '1 1 auto',
|
||||
initial: '0 1 auto',
|
||||
none: 'none',
|
||||
},
|
||||
flexGrow: {
|
||||
'0': '0',
|
||||
default: '1',
|
||||
},
|
||||
flexShrink: {
|
||||
'0': '0',
|
||||
default: '1',
|
||||
},
|
||||
fontFamily: {
|
||||
sans: [
|
||||
'-apple-system',
|
||||
'BlinkMacSystemFont',
|
||||
'"Segoe UI"',
|
||||
'Roboto',
|
||||
'"Helvetica Neue"',
|
||||
'Arial',
|
||||
'"Noto Sans"',
|
||||
'sans-serif',
|
||||
'"Apple Color Emoji"',
|
||||
'"Segoe UI Emoji"',
|
||||
'"Segoe UI Symbol"',
|
||||
'"Noto Color Emoji"',
|
||||
],
|
||||
serif: [
|
||||
'Georgia',
|
||||
'Cambria',
|
||||
'"Times New Roman"',
|
||||
'Times',
|
||||
'serif',
|
||||
],
|
||||
mono: [
|
||||
'Menlo',
|
||||
'Monaco',
|
||||
'Consolas',
|
||||
'"Liberation Mono"',
|
||||
'"Courier New"',
|
||||
'monospace',
|
||||
],
|
||||
},
|
||||
fontSize: {
|
||||
xs: '0.75rem',
|
||||
sm: '0.875rem',
|
||||
base: '1rem',
|
||||
lg: '1.125rem',
|
||||
xl: '1.25rem',
|
||||
'2xl': '1.5rem',
|
||||
'3xl': '1.875rem',
|
||||
'4xl': '2.25rem',
|
||||
'5xl': '3rem',
|
||||
'6xl': '4rem',
|
||||
},
|
||||
fontWeight: {
|
||||
hairline: '100',
|
||||
thin: '200',
|
||||
light: '300',
|
||||
normal: '400',
|
||||
medium: '500',
|
||||
semibold: '600',
|
||||
bold: '700',
|
||||
extrabold: '800',
|
||||
black: '900',
|
||||
},
|
||||
height: theme => ({
|
||||
auto: 'auto',
|
||||
...theme('spacing'),
|
||||
full: '100%',
|
||||
screen: '100vh',
|
||||
}),
|
||||
inset: {
|
||||
'0': '0',
|
||||
auto: 'auto',
|
||||
},
|
||||
letterSpacing: {
|
||||
tighter: '-0.05em',
|
||||
tight: '-0.025em',
|
||||
normal: '0',
|
||||
wide: '0.025em',
|
||||
wider: '0.05em',
|
||||
widest: '0.1em',
|
||||
},
|
||||
lineHeight: {
|
||||
none: '1',
|
||||
tight: '1.25',
|
||||
snug: '1.375',
|
||||
normal: '1.5',
|
||||
relaxed: '1.625',
|
||||
loose: '2',
|
||||
},
|
||||
listStyleType: {
|
||||
none: 'none',
|
||||
disc: 'disc',
|
||||
decimal: 'decimal',
|
||||
},
|
||||
margin: (theme, { negative }) => ({
|
||||
auto: 'auto',
|
||||
...theme('spacing'),
|
||||
...negative(theme('spacing')),
|
||||
}),
|
||||
maxHeight: {
|
||||
full: '100%',
|
||||
screen: '100vh',
|
||||
},
|
||||
maxWidth: {
|
||||
xs: '20rem',
|
||||
sm: '24rem',
|
||||
md: '28rem',
|
||||
lg: '32rem',
|
||||
xl: '36rem',
|
||||
'2xl': '42rem',
|
||||
'3xl': '48rem',
|
||||
'4xl': '56rem',
|
||||
'5xl': '64rem',
|
||||
'6xl': '72rem',
|
||||
full: '100%',
|
||||
},
|
||||
minHeight: {
|
||||
'0': '0',
|
||||
full: '100%',
|
||||
screen: '100vh',
|
||||
},
|
||||
minWidth: {
|
||||
'0': '0',
|
||||
full: '100%',
|
||||
},
|
||||
objectPosition: {
|
||||
bottom: 'bottom',
|
||||
center: 'center',
|
||||
left: 'left',
|
||||
'left-bottom': 'left bottom',
|
||||
'left-top': 'left top',
|
||||
right: 'right',
|
||||
'right-bottom': 'right bottom',
|
||||
'right-top': 'right top',
|
||||
top: 'top',
|
||||
},
|
||||
opacity: {
|
||||
'0': '0',
|
||||
'25': '0.25',
|
||||
'50': '0.5',
|
||||
'75': '0.75',
|
||||
'100': '1',
|
||||
},
|
||||
order: {
|
||||
first: '-9999',
|
||||
last: '9999',
|
||||
none: '0',
|
||||
'1': '1',
|
||||
'2': '2',
|
||||
'3': '3',
|
||||
'4': '4',
|
||||
'5': '5',
|
||||
'6': '6',
|
||||
'7': '7',
|
||||
'8': '8',
|
||||
'9': '9',
|
||||
'10': '10',
|
||||
'11': '11',
|
||||
'12': '12',
|
||||
},
|
||||
padding: theme => theme('spacing'),
|
||||
stroke: {
|
||||
current: 'currentColor',
|
||||
},
|
||||
textColor: theme => theme('colors'),
|
||||
width: theme => ({
|
||||
auto: 'auto',
|
||||
...theme('spacing'),
|
||||
'1/2': '50%',
|
||||
'1/3': '33.333333%',
|
||||
'2/3': '66.666667%',
|
||||
'1/4': '25%',
|
||||
'2/4': '50%',
|
||||
'3/4': '75%',
|
||||
'1/5': '20%',
|
||||
'2/5': '40%',
|
||||
'3/5': '60%',
|
||||
'4/5': '80%',
|
||||
'1/6': '16.666667%',
|
||||
'2/6': '33.333333%',
|
||||
'3/6': '50%',
|
||||
'4/6': '66.666667%',
|
||||
'5/6': '83.333333%',
|
||||
'1/12': '8.333333%',
|
||||
'2/12': '16.666667%',
|
||||
'3/12': '25%',
|
||||
'4/12': '33.333333%',
|
||||
'5/12': '41.666667%',
|
||||
'6/12': '50%',
|
||||
'7/12': '58.333333%',
|
||||
'8/12': '66.666667%',
|
||||
'9/12': '75%',
|
||||
'10/12': '83.333333%',
|
||||
'11/12': '91.666667%',
|
||||
full: '100%',
|
||||
screen: '100vw',
|
||||
}),
|
||||
zIndex: {
|
||||
auto: 'auto',
|
||||
'0': '0',
|
||||
'10': '10',
|
||||
'20': '20',
|
||||
'30': '30',
|
||||
'40': '40',
|
||||
'50': '50',
|
||||
},
|
||||
},
|
||||
variants: {
|
||||
accessibility: ['responsive', 'focus'],
|
||||
alignContent: ['responsive'],
|
||||
alignItems: ['responsive'],
|
||||
alignSelf: ['responsive'],
|
||||
appearance: ['responsive'],
|
||||
backgroundAttachment: ['responsive'],
|
||||
backgroundColor: ['responsive', 'hover', 'focus'],
|
||||
backgroundPosition: ['responsive'],
|
||||
backgroundRepeat: ['responsive'],
|
||||
backgroundSize: ['responsive'],
|
||||
borderCollapse: ['responsive'],
|
||||
borderColor: ['responsive', 'hover', 'focus'],
|
||||
borderRadius: ['responsive'],
|
||||
borderStyle: ['responsive'],
|
||||
borderWidth: ['responsive'],
|
||||
boxShadow: ['responsive', 'hover', 'focus'],
|
||||
cursor: ['responsive'],
|
||||
display: ['responsive'],
|
||||
fill: ['responsive'],
|
||||
flex: ['responsive'],
|
||||
flexDirection: ['responsive'],
|
||||
flexGrow: ['responsive'],
|
||||
flexShrink: ['responsive'],
|
||||
flexWrap: ['responsive'],
|
||||
float: ['responsive'],
|
||||
fontFamily: ['responsive'],
|
||||
fontSize: ['responsive'],
|
||||
fontSmoothing: ['responsive'],
|
||||
fontStyle: ['responsive'],
|
||||
fontWeight: ['responsive', 'hover', 'focus'],
|
||||
height: ['responsive'],
|
||||
inset: ['responsive'],
|
||||
justifyContent: ['responsive'],
|
||||
letterSpacing: ['responsive'],
|
||||
lineHeight: ['responsive'],
|
||||
listStylePosition: ['responsive'],
|
||||
listStyleType: ['responsive'],
|
||||
margin: ['responsive'],
|
||||
maxHeight: ['responsive'],
|
||||
maxWidth: ['responsive'],
|
||||
minHeight: ['responsive'],
|
||||
minWidth: ['responsive'],
|
||||
objectFit: ['responsive'],
|
||||
objectPosition: ['responsive'],
|
||||
opacity: ['responsive'],
|
||||
order: ['responsive'],
|
||||
outline: ['responsive', 'focus'],
|
||||
overflow: ['responsive'],
|
||||
padding: ['responsive'],
|
||||
pointerEvents: ['responsive'],
|
||||
position: ['responsive'],
|
||||
resize: ['responsive'],
|
||||
stroke: ['responsive'],
|
||||
tableLayout: ['responsive'],
|
||||
textAlign: ['responsive'],
|
||||
textColor: ['responsive', 'hover', 'focus'],
|
||||
textDecoration: ['responsive', 'hover', 'focus'],
|
||||
textTransform: ['responsive'],
|
||||
userSelect: ['responsive'],
|
||||
verticalAlign: ['responsive'],
|
||||
visibility: ['responsive'],
|
||||
whitespace: ['responsive'],
|
||||
width: ['responsive'],
|
||||
wordBreak: ['responsive'],
|
||||
zIndex: ['responsive'],
|
||||
},
|
||||
corePlugins: {},
|
||||
plugins: [],
|
||||
}
|
||||
*/
|
7761
docs/yarn.lock
@ -1,24 +0,0 @@
|
||||
# README
|
||||
|
||||
This README would normally document whatever steps are necessary to get the
|
||||
application up and running.
|
||||
|
||||
Things you may want to cover:
|
||||
|
||||
* Ruby version
|
||||
|
||||
* System dependencies
|
||||
|
||||
* Configuration
|
||||
|
||||
* Database creation
|
||||
|
||||
* Database initialization
|
||||
|
||||
* How to run the test suite
|
||||
|
||||
* Services (job queues, cache servers, search engines, etc.)
|
||||
|
||||
* Deployment instructions
|
||||
|
||||
* ...
|
@ -1,25 +0,0 @@
|
||||
events { }
|
||||
|
||||
http {
|
||||
server {
|
||||
server_name localhost;
|
||||
listen 3001;
|
||||
|
||||
location /api/v1/graphql {
|
||||
proxy_pass http://super_graph:8080;
|
||||
proxy_redirect off;
|
||||
#rewrite ^/api/v1/graphql$ $1 break;
|
||||
}
|
||||
|
||||
location /super-graph {
|
||||
proxy_pass http://super_graph:8080;
|
||||
proxy_redirect off;
|
||||
rewrite ^/super-graph/(.*)$ /$1 break;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://web:3000;
|
||||
proxy_redirect off;
|
||||
}
|
||||
}
|
||||
}
|
14
fresh.conf
@ -1,14 +0,0 @@
|
||||
root: .
|
||||
tmp_path: ./tmp
|
||||
build_name: runner-build
|
||||
build_log: runner-build-errors.log
|
||||
valid_ext: .go, .tpl, .tmpl, .html, .yml
|
||||
no_rebuild_ext: .tpl, .tmpl, .html
|
||||
ignored: web, tmp, vendor, example, docs
|
||||
build_delay: 600
|
||||
colors: 1
|
||||
log_color_main: cyan
|
||||
log_color_build: yellow
|
||||
log_color_runner: green
|
||||
log_color_watcher: magenta
|
||||
log_color_app:
|
30
fuzzbuzz.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
base: ubuntu:16.04
|
||||
targets:
|
||||
|
||||
- name: qcode
|
||||
language: go
|
||||
version: "1.11"
|
||||
corpus: ./corpus
|
||||
memory_limit: "100" # in megabytes
|
||||
timeout: "500" # in milliseconds
|
||||
harness:
|
||||
function: FuzzerEntrypoint
|
||||
# package defines where to import FuzzerEntrypoint from
|
||||
package: github.com/dosco/super-graph/qcode
|
||||
# the repository will be cloned to
|
||||
# $GOPATH/src/github.com/fuzzbuzz/tutorial
|
||||
checkout: github.com/dosco/super-graph
|
||||
|
||||
- name: jsn
|
||||
language: go
|
||||
version: "1.11"
|
||||
corpus: ./corpus
|
||||
memory_limit: "100" # in megabytes
|
||||
timeout: "500" # in milliseconds
|
||||
harness:
|
||||
function: FuzzerEntrypoint
|
||||
# package defines where to import FuzzerEntrypoint from
|
||||
package: github.com/dosco/super-graph/jsn
|
||||
# the repository will be cloned to
|
||||
# $GOPATH/src/github.com/fuzzbuzz/tutorial
|
||||
checkout: github.com/dosco/super-graph
|
46
go.mod
@ -1,20 +1,42 @@
|
||||
module github.com/dosco/super-graph
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.4.2
|
||||
github.com/GeertJohan/go.rice v1.0.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/OneOfOne/xxhash v1.2.5 // indirect
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible
|
||||
github.com/cespare/xxhash/v2 v2.1.0
|
||||
github.com/daaku/go.zipexe v1.0.1 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/dlclark/regexp2 v1.2.0 // indirect
|
||||
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733
|
||||
github.com/fsnotify/fsnotify v1.4.7
|
||||
github.com/garyburd/redigo v1.6.0
|
||||
|
||||
github.com/go-pg/pg v8.0.1+incompatible
|
||||
github.com/gobuffalo/flect v0.1.1
|
||||
github.com/gorilla/websocket v1.4.0
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||
github.com/sirupsen/logrus v1.4.0
|
||||
github.com/spf13/viper v1.3.1
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
|
||||
github.com/gobuffalo/flect v0.1.6
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||
github.com/jackc/pgconn v1.0.1
|
||||
github.com/jackc/pgtype v1.0.1
|
||||
github.com/jackc/pgx v3.6.0+incompatible
|
||||
github.com/jackc/pgx/v4 v4.0.1
|
||||
github.com/jackc/tern v1.8.2
|
||||
github.com/magiconair/properties v1.8.1 // indirect
|
||||
github.com/pelletier/go-toml v1.4.0 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/rs/zerolog v1.15.0
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.2.2 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/valyala/fasttemplate v1.0.1
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a
|
||||
mellium.im/sasl v0.2.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
|
||||
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
294
go.sum
@ -1,98 +1,336 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
|
||||
github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=
|
||||
github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
|
||||
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
|
||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3 h1:+qz9Ga6l6lKw6fgvk5RMV5HQznSLvI8Zxajwdj4FhFg=
|
||||
github.com/adjust/gorails v0.0.0-20171013043634-2786ed0c03d3/go.mod h1:FlkD11RtgMTYjVuBnb7cxoHmQGqvPpCsr2atC88nl/M=
|
||||
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8=
|
||||
github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.0.0 h1:Eb1IiuHmi3FhT12NKfqCQXSXRqc4NTMvgJoREemrSt4=
|
||||
github.com/cespare/xxhash/v2 v2.0.0/go.mod h1:MaMeaVDXZNmTpkOyhVs3/WfjgobkbQgfrVnrr3DyZL0=
|
||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=
|
||||
github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
|
||||
github.com/daaku/go.zipexe v1.0.1 h1:wV4zMsDOI2SZ2m7Tdz1Ps96Zrx+TzaK15VbUaGozw0M=
|
||||
github.com/daaku/go.zipexe v1.0.1/go.mod h1:5xWogtqlYnfBXkSB1o9xysukNP9GTvaNkqzUZbt3Bw8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
|
||||
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733 h1:cyNc40Dx5YNEO94idePU8rhVd3dn+sd04Arh0kDBAaw=
|
||||
github.com/dop251/goja v0.0.0-20190912223329-aa89e6a4c733/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
|
||||
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/go-pg/pg v8.0.1+incompatible h1:gi93AxXmqlFGT0os5z2kTnbDqCk6BHXnA9MMApVxAkY=
|
||||
github.com/go-pg/pg v8.0.1+incompatible/go.mod h1:a2oXow+aFOrvwcKs3eIA0lNFmMilrxK2sOkB5NWe0vA=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=
|
||||
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/flect v0.1.1 h1:GTZJjJufv9FxgRs1+0Soo3wj+Md3kTUmTER/YE4uINA=
|
||||
github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/gobuffalo/flect v0.1.6 h1:D7KWNRFiCknJKA495/e1BO7oxqf8tbieaLv/ehoZ/+g=
|
||||
github.com/gobuffalo/flect v0.1.6/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYXJi4pg1ZKM7nxc5AfXfojeLLW7O5J3k=
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0 h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||
github.com/jackc/pgconn v1.0.1 h1:ZANo4pIkeHKIVD1cQMcxu8fwrwIICLblzi9HCjooZeQ=
|
||||
github.com/jackc/pgconn v1.0.1/go.mod h1:GgY/Lbj1VonNaVdNUHs9AwWom3yP2eymFQ1C8z9r/Lk=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0 h1:FApgMJ/GtaXfI0s8Lvd0kaLaRwMOhs4VH92pwkwQQvU=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.0.1 h1:7GWB9n3DdnO3TIbj59wMAE9QcHPL4cy/Bbtk5P1Noow=
|
||||
github.com/jackc/pgtype v1.0.1/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
|
||||
github.com/jackc/pgx v0.0.0-20180217033919-55ca9db5d578/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=
|
||||
github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
github.com/jackc/pgx/v4 v4.0.1 h1:NNrG0MX2AVEJw1NNDYg+ixSXycCfWWKeqMuQHQkAngc=
|
||||
github.com/jackc/pgx/v4 v4.0.1/go.mod h1:NeQ64VJooukJGFLX2r01sJL/gRbKlpvsO2giBvjfgrY=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.0.0 h1:rbjAshlgKscNa7j0jAM0uNQflis5o2XUogPMVAwtcsM=
|
||||
github.com/jackc/puddle v1.0.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/tern v1.8.2 h1:+d9eK83fRS0dbf6nt+2tjILYF4FKG1O5xTFB8Lzc66U=
|
||||
github.com/jackc/tern v1.8.2/go.mod h1:AMppp2oyCT6rYnJHLLMmPWwahfFvdIVi6mr9gH81Nxs=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/pkg/errors v0.0.0-20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.0-20160114030619-9c9300901990/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20151218134703-7f60f83a2c81/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.1 h1:5+8j8FTpnFV4nEImW/ofkzEt8VoOiLXxdYIDsB73T38=
|
||||
github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
|
||||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20151201002508-7b85b097bf75/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ=
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 h1:q9u40nxWT5zRClI/uU9dHCiYGottAg6Nzz4YUQyHxdA=
|
||||
golang.org/x/sys v0.0.0-20190927073244-c990c680b611/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w=
|
||||
mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
39
jsn/README.md
Normal file
@ -0,0 +1,39 @@
|
||||
# JSN - Fast low allocation JSON library
|
||||
## Design
|
||||
|
||||
This libary is designed as a set of seperate functions to extract data and mutate
|
||||
JSON. All functions are focused on keeping allocations to a minimum and be as fast
|
||||
as possible. The functions don't validate the JSON a seperate `Validate` function
|
||||
does that.
|
||||
|
||||
The JSON parsing algo processes each object `{}` or array `[]` in a bottom up way
|
||||
where once the end of the array or object is found only then the keys within it are
|
||||
parsed from the top down.
|
||||
|
||||
```
|
||||
{"id":1,"posts": [{"title":"PT1-1","description":"PD1-1"}], "full_name":"FN1","email":"E1" }
|
||||
|
||||
id: 1
|
||||
|
||||
posts: [{"title":"PT1-1","description":"PD1-1"}]
|
||||
|
||||
[{"title":"PT1-1","description":"PD1-1"}]
|
||||
|
||||
{"title":"PT1-1","description":"PD1-1"}
|
||||
|
||||
title: "PT1-1"
|
||||
|
||||
description: "PD1-1
|
||||
|
||||
full_name: "FN1"
|
||||
|
||||
email: "E1"
|
||||
```
|
||||
|
||||
## Functions
|
||||
|
||||
- Strip: Strip a path from the root to a child node and return the rest
|
||||
- Replace: Replace values by key
|
||||
- Get: Get all keys
|
||||
- Filter: Extract specific keys from an object
|
||||
- Tree: Fetch unique keys from an array or object
|
97
jsn/corpus/001.json
Normal file
@ -0,0 +1,97 @@
|
||||
{
|
||||
"data": {
|
||||
"test": { "__twitter_id": "ABCD" },
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
"id": 8,
|
||||
"full_name": "Caroll Orn Sr.",
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "ABC123"
|
||||
"more": [{
|
||||
"__twitter_id": "more123",
|
||||
"hello: "world
|
||||
}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"full_name": "Jerry Dickinson",
|
||||
"email": "user1@demo.com",
|
||||
"__twitter_id": [{ "name": "hello" }, { "name": "world"}]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"full_name": "Kenna Cassin",
|
||||
"email": "user2@demo.com",
|
||||
"__twitter_id": { "name": "hello", "address": { "work": "1 infinity loop" } }
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"full_name": "Mr. Pat Parisian",
|
||||
"email": "__twitter_id",
|
||||
"__twitter_id": 1234567890
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"full_name": "Bette Ebert",
|
||||
"email": "janeenrath@goyette.com",
|
||||
"__twitter_id": 1.23E
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"full_name": "Everett Kiehn",
|
||||
"email": "michael@bartoletti.com",
|
||||
"__twitter_id": true
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"full_name": "Katrina Cronin",
|
||||
"email": "loretaklocko@framivolkman.org",
|
||||
"__twitter_id": false
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"full_name": "Caroll Orn Sr.",
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "2048666903444506956"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"full_name": "Gwendolyn Ziemann",
|
||||
"email": "renaytoy@rutherford.co",
|
||||
"__twitter_id": ["hello", "world"]
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"full_name": "Mrs. Rosann Fritsch",
|
||||
"email": "holliemosciski@thiel.org",
|
||||
"__twitter_id": "2048666903444506956"
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"full_name": "Arden Koss",
|
||||
"email": "cristobalankunding@howewelch.org",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"something": null
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"full_name": "Brenton Bauch PhD",
|
||||
"email": "renee@miller.co",
|
||||
"__twitter_id": 1
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"full_name": "Daine Gleichner",
|
||||
"email": "andrea@gmail.com",
|
||||
"__twitter_id": "",
|
||||
"id__twitter_id": "NOOO",
|
||||
"work_email": "andrea@nienow.co"
|
||||
}
|
||||
]}
|
||||
}
|
168
jsn/filter.go
Normal file
@ -0,0 +1,168 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
func Filter(w *bytes.Buffer, b []byte, keys []string) error {
|
||||
var err error
|
||||
kmap := make(map[uint64]struct{}, len(keys))
|
||||
|
||||
for i := range keys {
|
||||
kmap[xxhash.Sum64String(keys[i])] = struct{}{}
|
||||
}
|
||||
|
||||
// is an list
|
||||
isList := false
|
||||
|
||||
// list item
|
||||
item := 0
|
||||
|
||||
// field in an object
|
||||
field := 0
|
||||
|
||||
s, e, d := 0, 0, 0
|
||||
|
||||
var k []byte
|
||||
state := expectKey
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
|
||||
if state == expectKey {
|
||||
switch b[i] {
|
||||
case '[':
|
||||
if !isList {
|
||||
err = w.WriteByte('[')
|
||||
}
|
||||
isList = true
|
||||
case '{':
|
||||
if item == 0 {
|
||||
err = w.WriteByte('{')
|
||||
} else {
|
||||
_, err = w.Write([]byte("},{"))
|
||||
}
|
||||
field = 0
|
||||
item++
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case state == expectKey && b[i] == '"':
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
state = expectColon
|
||||
k = b[(s + 1):i]
|
||||
|
||||
case state == expectColon && b[i] == ':':
|
||||
state = expectValue
|
||||
|
||||
case state == expectValue && b[i] == '"':
|
||||
state = expectString
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
state = expectListClose
|
||||
d++
|
||||
|
||||
case state == expectListClose && d == 0 && b[i] == ']':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '{':
|
||||
state = expectObjClose
|
||||
d++
|
||||
|
||||
case state == expectObjClose && d == 0 && b[i] == '}':
|
||||
e = i
|
||||
|
||||
case state == expectValue && (b[i] >= '0' && b[i] <= '9'):
|
||||
state = expectNumClose
|
||||
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
state = expectBoolClose
|
||||
|
||||
case state == expectBoolClose && (b[i] == 'e' || b[i] == 'E'):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
e = i
|
||||
}
|
||||
|
||||
if e != 0 {
|
||||
state = expectKey
|
||||
cb := b[s:(e + 1)]
|
||||
e = 0
|
||||
|
||||
if _, ok := kmap[xxhash.Sum64(k)]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if field != 0 {
|
||||
if err := w.WriteByte(','); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
sk := 0
|
||||
for i := 0; i < len(cb); i++ {
|
||||
if cb[i] == '\n' || cb[i] == '\t' {
|
||||
if _, err := w.Write(cb[sk:i]); err != nil {
|
||||
return err
|
||||
}
|
||||
sk = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
if sk > 0 && sk < len(cb) {
|
||||
_, err = w.Write(cb[sk:len(cb)])
|
||||
} else {
|
||||
_, err = w.Write(cb)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
field++
|
||||
}
|
||||
}
|
||||
|
||||
if item != 0 {
|
||||
if err := w.WriteByte('}'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if isList {
|
||||
if err := w.WriteByte(']'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
35
jsn/fuzz.go
Normal file
@ -0,0 +1,35 @@
|
||||
package jsn
|
||||
|
||||
import "bytes"
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func FuzzerEntryPoint(data []byte) int {
|
||||
err1 := Validate(string(data))
|
||||
|
||||
var b1 bytes.Buffer
|
||||
err2 := Filter(&b1, data, []string{"id", "full_name", "embed"})
|
||||
|
||||
path1 := [][]byte{[]byte("data"), []byte("users")}
|
||||
Strip(data, path1)
|
||||
|
||||
from := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
}
|
||||
|
||||
to := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`"1234567890"`)},
|
||||
{[]byte("some_list"), []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)},
|
||||
}
|
||||
|
||||
var b2 bytes.Buffer
|
||||
err3 := Replace(&b2, data, from, to)
|
||||
|
||||
Keys(data)
|
||||
|
||||
if err1 != nil || err2 != nil || err3 != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
140
jsn/get.go
Normal file
@ -0,0 +1,140 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
expectKey int = iota
|
||||
expectKeyClose
|
||||
expectColon
|
||||
expectValue
|
||||
expectString
|
||||
expectNull
|
||||
expectListClose
|
||||
expectObjClose
|
||||
expectBoolClose
|
||||
expectNumClose
|
||||
)
|
||||
|
||||
type Field struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func Value(b []byte) []byte {
|
||||
e := (len(b) - 1)
|
||||
switch {
|
||||
case b[0] == '"' && b[e] == '"':
|
||||
return b[1:(len(b) - 1)]
|
||||
case b[0] == '[' && b[e] == ']':
|
||||
return nil
|
||||
case b[0] == '{' && b[e] == '}':
|
||||
return nil
|
||||
default:
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func Get(b []byte, keys [][]byte) []Field {
|
||||
kmap := make(map[uint64]struct{}, len(keys))
|
||||
|
||||
for i := range keys {
|
||||
kmap[xxhash.Sum64(keys[i])] = struct{}{}
|
||||
}
|
||||
|
||||
res := make([]Field, 0, 20)
|
||||
|
||||
s, e, d := 0, 0, 0
|
||||
|
||||
var k []byte
|
||||
state := expectKey
|
||||
|
||||
n := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case state == expectKey && b[i] == '"':
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
state = expectColon
|
||||
k = b[(s + 1):i]
|
||||
|
||||
case state == expectColon && b[i] == ':':
|
||||
state = expectValue
|
||||
|
||||
case state == expectValue && b[i] == '"':
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
state = expectListClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectListClose && d == 0 && b[i] == ']':
|
||||
e = i
|
||||
i = s
|
||||
|
||||
case state == expectValue && b[i] == '{':
|
||||
state = expectObjClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectObjClose && d == 0 && b[i] == '}':
|
||||
e = i
|
||||
i = s
|
||||
|
||||
case state == expectValue && (b[i] >= '0' && b[i] <= '9'):
|
||||
state = expectNumClose
|
||||
s = i
|
||||
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
state = expectBoolClose
|
||||
s = i
|
||||
|
||||
case state == expectBoolClose && (b[i] == 'e' || b[i] == 'E'):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
e = i
|
||||
}
|
||||
|
||||
if e != 0 {
|
||||
_, ok := kmap[xxhash.Sum64(k)]
|
||||
|
||||
if ok {
|
||||
res = append(res, Field{k, b[s:(e + 1)]})
|
||||
n++
|
||||
}
|
||||
|
||||
state = expectKey
|
||||
e = 0
|
||||
}
|
||||
}
|
||||
|
||||
return res[:n]
|
||||
}
|
478
jsn/json_test.go
Normal file
@ -0,0 +1,478 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
input1 = `
|
||||
{
|
||||
"data": {
|
||||
"test": { "__twitter_id": "ABCD" },
|
||||
"users": [
|
||||
{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
"id": 8,
|
||||
"full_name": "Caroll Orn Sr.",
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "ABC123"
|
||||
"more": [{
|
||||
"__twitter_id": "more123",
|
||||
"hello: "world
|
||||
}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"full_name": "Jerry Dickinson",
|
||||
"email": "user1@demo.com",
|
||||
"__twitter_id": [{ "name": "hello" }, { "name": "world"}]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"full_name": "Kenna Cassin",
|
||||
"email": "user2@demo.com",
|
||||
"__twitter_id": { "name": "hello", "address": { "work": "1 infinity loop" } }
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"full_name": "Mr. Pat Parisian",
|
||||
"email": "__twitter_id",
|
||||
"__twitter_id": 1234567890
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"full_name": "Bette Ebert",
|
||||
"email": "janeenrath@goyette.com",
|
||||
"__twitter_id": 1.23E
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"full_name": "Everett Kiehn",
|
||||
"email": "michael@bartoletti.com",
|
||||
"__twitter_id": true
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"full_name": "Katrina Cronin",
|
||||
"email": "loretaklocko@framivolkman.org",
|
||||
"__twitter_id": false
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"full_name": "Caroll Orn Sr.",
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "2048666903444506956"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"full_name": "Gwendolyn Ziemann",
|
||||
"email": "renaytoy@rutherford.co",
|
||||
"__twitter_id": ["hello", "world"]
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"full_name": "Mrs. Rosann Fritsch",
|
||||
"email": "holliemosciski@thiel.org",
|
||||
"__twitter_id": "2048666903444506956"
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"full_name": "Arden Koss",
|
||||
"email": "cristobalankunding@howewelch.org",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"something": null
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"full_name": "Brenton Bauch PhD",
|
||||
"email": "renee@miller.co",
|
||||
"__twitter_id": 1
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"full_name": "Daine Gleichner",
|
||||
"email": "andrea@gmail.com",
|
||||
"__twitter_id": "",
|
||||
"id__twitter_id": "NOOO",
|
||||
"work_email": "andrea@nienow.co"
|
||||
}
|
||||
]}
|
||||
}`
|
||||
|
||||
input2 = `
|
||||
[{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"something": null,
|
||||
"embed": {
|
||||
"id": 8,
|
||||
"full_name": "Caroll Orn Sr.",
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "ABC123"
|
||||
}
|
||||
},
|
||||
{
|
||||
"m": 1,
|
||||
"id": 2,
|
||||
"full_name": "Jerry Dickinson",
|
||||
"email": "user1@demo.com",
|
||||
"__twitter_id": [{ "name": "hello" }, { "name": "world"}]
|
||||
}]`
|
||||
|
||||
input3 = `
|
||||
{
|
||||
"data": {
|
||||
"test": { "__twitter_id": "ABCD" },
|
||||
"users": [{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]
|
||||
}
|
||||
}`
|
||||
|
||||
input4 = `
|
||||
{ "users" : [{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
"id": 8,
|
||||
"full_name": null,
|
||||
"email": "joannarau@hegmann.io",
|
||||
"__twitter_id": "ABC123"
|
||||
}
|
||||
},
|
||||
{
|
||||
"m": 1,
|
||||
"id": 2,
|
||||
"full_name": "Jerry Dickinson",
|
||||
"email": "user1@demo.com",
|
||||
"__twitter_id": [{ "name": "hello" }, { "name": "world"}]
|
||||
}] }`
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
values := Get([]byte(input1), [][]byte{
|
||||
[]byte("__twitter_id"),
|
||||
[]byte("work_email"),
|
||||
})
|
||||
|
||||
expected := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`"ABCD"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"2048666903444506956"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"more123"`)},
|
||||
{[]byte("__twitter_id"),
|
||||
[]byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"),
|
||||
[]byte(`{ "name": "hello", "address": { "work": "1 infinity loop" } }`),
|
||||
},
|
||||
{[]byte("__twitter_id"), []byte(`1234567890`)},
|
||||
{[]byte("__twitter_id"), []byte(`1.23E`)},
|
||||
{[]byte("__twitter_id"), []byte(`true`)},
|
||||
{[]byte("__twitter_id"), []byte(`false`)},
|
||||
{[]byte("__twitter_id"), []byte(`"2048666903444506956"`)},
|
||||
{[]byte("__twitter_id"), []byte(`["hello", "world"]`)},
|
||||
{[]byte("__twitter_id"), []byte(`"2048666903444506956"`)},
|
||||
{[]byte("__twitter_id"), []byte(`"2048666903444506956"`)},
|
||||
{[]byte("__twitter_id"), []byte(`1`)},
|
||||
{[]byte("__twitter_id"), []byte(`""`)},
|
||||
{[]byte("work_email"), []byte(`"andrea@nienow.co"`)},
|
||||
}
|
||||
|
||||
if len(values) != len(expected) {
|
||||
t.Fatal("len(values) != len(expected)")
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
if bytes.Equal(values[i].Key, expected[i].Key) == false {
|
||||
t.Error(string(values[i].Key), " != ", string(expected[i].Key))
|
||||
}
|
||||
|
||||
if bytes.Equal(values[i].Value, expected[i].Value) == false {
|
||||
t.Error(string(values[i].Value), " != ", string(expected[i].Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValue(t *testing.T) {
|
||||
v1 := []byte("12345")
|
||||
if !bytes.Equal(Value(v1), v1) {
|
||||
t.Fatal("Number value invalid")
|
||||
}
|
||||
|
||||
v2 := []byte(`"12345"`)
|
||||
if !bytes.Equal(Value(v2), []byte(`12345`)) {
|
||||
t.Fatal("String value invalid")
|
||||
}
|
||||
|
||||
v3 := []byte(`{ "hello": "world" }`)
|
||||
if Value(v3) != nil {
|
||||
t.Fatal("Object value is not nil", Value(v3))
|
||||
}
|
||||
|
||||
v4 := []byte(`[ "hello", "world" ]`)
|
||||
if Value(v4) != nil {
|
||||
t.Fatal("List value is not nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter1(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
Filter(&b, []byte(input2), []string{"id", "full_name", "embed"})
|
||||
|
||||
expected := `[{"id": 1,"full_name": "Sidney Stroman","embed": {"id": 8,"full_name": "Caroll Orn Sr.","email": "joannarau@hegmann.io","__twitter_id": "ABC123"}},{"id": 2,"full_name": "Jerry Dickinson"}]`
|
||||
|
||||
if b.String() != expected {
|
||||
t.Error("Does not match expected json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter2(t *testing.T) {
|
||||
value := `[{"id":1,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":100,"amount_refunded":0,"date":"01/01/2019","application":null,"billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}}, {"id":2,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":150,"amount_refunded":0,"date":"02/18/2019","billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}},{"id":3,"customer_id":"cus_2TbMGf3cl0","object":"charge","amount":150,"amount_refunded":50,"date":"03/21/2019","billing_details":{"address":"1 Infinity Drive","zipcode":"94024"}}]`
|
||||
|
||||
var b bytes.Buffer
|
||||
Filter(&b, []byte(value), []string{"id"})
|
||||
|
||||
expected := `[{"id":1},{"id":2},{"id":3}]`
|
||||
|
||||
if b.String() != expected {
|
||||
t.Error("Does not match expected json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrip(t *testing.T) {
|
||||
path1 := [][]byte{[]byte("data"), []byte("users")}
|
||||
value1 := Strip([]byte(input3), path1)
|
||||
|
||||
expected := []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)
|
||||
|
||||
if bytes.Equal(value1, expected) == false {
|
||||
t.Log(value1)
|
||||
t.Error("[Valid path] Does not match expected json")
|
||||
}
|
||||
|
||||
path2 := [][]byte{[]byte("boo"), []byte("hoo")}
|
||||
value2 := Strip([]byte(input3), path2)
|
||||
|
||||
if bytes.Equal(value2, []byte(input3)) == false {
|
||||
t.Log(value2)
|
||||
t.Error("[Invalid path] Does not match expected json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateTrue(t *testing.T) {
|
||||
json := []byte(` [{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)
|
||||
|
||||
err := Validate(string(json))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateFalse(t *testing.T) {
|
||||
json := []byte(` [{ "hello": 123"<html>}]`)
|
||||
|
||||
err := Validate(string(json))
|
||||
if err == nil {
|
||||
t.Error("JSON validation failed to detect invalid json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
from := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
}
|
||||
|
||||
to := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`"1234567890"`)},
|
||||
{[]byte("some_list"), []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)},
|
||||
}
|
||||
|
||||
expected := `{ "users" : [{
|
||||
"id": 1,
|
||||
"full_name": "Sidney Stroman",
|
||||
"email": "user0@demo.com",
|
||||
"__twitter_id": "2048666903444506956",
|
||||
"embed": {
|
||||
"id": 8,
|
||||
"full_name": null,
|
||||
"email": "joannarau@hegmann.io",
|
||||
"some_list":[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]
|
||||
}
|
||||
},
|
||||
{
|
||||
"m": 1,
|
||||
"id": 2,
|
||||
"full_name": "Jerry Dickinson",
|
||||
"email": "user1@demo.com",
|
||||
"__twitter_id":"1234567890"
|
||||
}] }`
|
||||
|
||||
err := Replace(&buf, []byte(input4), from, to)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.String() != expected {
|
||||
t.Log(buf.String())
|
||||
t.Error("Does not match expected json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceEmpty(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
json := `{ "users" : [{"id":1,"full_name":"Sidney Stroman","email":"user0@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":2,"full_name":"Jerry Dickinson","email":"user1@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":3,"full_name":"Kenna Cassin","email":"user2@demo.com","__users_twitter_id":"2048666903444506956"}, {"id":4,"full_name":"Mr. Pat Parisian","email":"rodney@kautzer.biz","__users_twitter_id":"2048666903444506956"}, {"id":5,"full_name":"Bette Ebert","email":"janeenrath@goyette.com","__users_twitter_id":"2048666903444506956"}, {"id":6,"full_name":"Everett Kiehn","email":"michael@bartoletti.com","__users_twitter_id":"2048666903444506956"}, {"id":7,"full_name":"Katrina Cronin","email":"loretaklocko@framivolkman.org","__users_twitter_id":"2048666903444506956"}, {"id":8,"full_name":"Caroll Orn Sr.","email":"joannarau@hegmann.io","__users_twitter_id":"2048666903444506956"}, {"id":9,"full_name":"Gwendolyn Ziemann","email":"renaytoy@rutherford.co","__users_twitter_id":"2048666903444506956"}, {"id":10,"full_name":"Mrs. Rosann Fritsch","email":"holliemosciski@thiel.org","__users_twitter_id":"2048666903444506956"}, {"id":11,"full_name":"Arden Koss","email":"cristobalankunding@howewelch.org","__users_twitter_id":"2048666903444506956"}, {"id":12,"full_name":"Brenton Bauch PhD","email":"renee@miller.co","__users_twitter_id":"2048666903444506956"}, {"id":13,"full_name":"Daine Gleichner","email":"andrea@nienow.co","__users_twitter_id":"2048666903444506956"}] }`
|
||||
|
||||
err := Replace(&buf, []byte(json), []Field{}, []Field{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.String() != json {
|
||||
t.Log(buf.String())
|
||||
t.Error("Does not match expected json")
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeys1(t *testing.T) {
|
||||
json := `[{"id":1,"posts": [{"title":"PT1-1","description":"PD1-1"}, {"title":"PT1-2","description":"PD1-2"}], "full_name":"FN1","email":"E1","books": [{"name":"BN1-1","description":"BD1-1"},{"name":"BN1-2","description":"BD1-2"},{"name":"BN1-2","description":"BD1-2"}]},{"id":1,"posts": [{"title":"PT1-1","description":"PD1-1"}, {"title":"PT1-2","description":"PD1-2"}], "full_name":"FN1","email":"E1","books": [{"name":"BN1-1","description":"BD1-1"},{"name":"BN1-2","description":"BD1-2"},{"name":"BN1-2","description":"BD1-2"}]},{"id":1,"posts": [{"title":"PT1-1","description":"PD1-1"}, {"title":"PT1-2","description":"PD1-2"}], "full_name":"FN1","email":"E1","books": [{"name":"BN1-1","description":"BD1-1"},{"name":"BN1-2","description":"BD1-2"},{"name":"BN1-2","description":"BD1-2"}]}]`
|
||||
|
||||
fields := Keys([]byte(json))
|
||||
|
||||
exp := []string{
|
||||
"id", "posts", "title", "description", "full_name", "email", "books", "name", "description",
|
||||
}
|
||||
|
||||
if len(exp) != len(fields) {
|
||||
t.Errorf("Expected %d fields %d", len(exp), len(fields))
|
||||
}
|
||||
|
||||
for i := range exp {
|
||||
if string(fields[i]) != exp[i] {
|
||||
t.Errorf("Expected field '%s' got '%s'", string(exp[i]), fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeys2(t *testing.T) {
|
||||
json := `{"id":1,"posts": [{"title":"PT1-1","description":"PD1-1"}, {"title":"PT1-2","description":"PD1-2"}], "full_name":"FN1","email":"E1","books": [{"name":"BN1-1","description":"BD1-1"},{"name":"BN1-2","description":"BD1-2"},{"name":"BN1-2","description":"BD1-2"}]}`
|
||||
|
||||
fields := Keys([]byte(json))
|
||||
|
||||
exp := []string{
|
||||
"id", "posts", "title", "description", "full_name", "email", "books", "name", "description",
|
||||
}
|
||||
|
||||
// for i := range fields {
|
||||
// fmt.Println("-->", string(fields[i]))
|
||||
// }
|
||||
|
||||
if len(exp) != len(fields) {
|
||||
t.Errorf("Expected %d fields %d", len(exp), len(fields))
|
||||
}
|
||||
|
||||
for i := range exp {
|
||||
if string(fields[i]) != exp[i] {
|
||||
t.Errorf("Expected field '%s' got '%s'", string(exp[i]), fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeys3(t *testing.T) {
|
||||
json := `{
|
||||
"insert": {
|
||||
"created_at": "now",
|
||||
"test": { "type1": "a", "type2": "b" },
|
||||
"name": "Hello",
|
||||
"updated_at": "now",
|
||||
"description": "World"
|
||||
},
|
||||
"user": 123
|
||||
}`
|
||||
|
||||
fields := Keys([]byte(json))
|
||||
|
||||
exp := []string{
|
||||
"insert", "created_at", "test", "type1", "type2", "name", "updated_at", "description",
|
||||
"user",
|
||||
}
|
||||
|
||||
if len(exp) != len(fields) {
|
||||
t.Errorf("Expected %d fields %d", len(exp), len(fields))
|
||||
}
|
||||
|
||||
for i := range exp {
|
||||
if string(fields[i]) != exp[i] {
|
||||
t.Errorf("Expected field '%s' got '%s'", string(exp[i]), fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGet(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
Get([]byte(input1), [][]byte{[]byte("__twitter_id")})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
keys := []string{"id", "full_name", "embed", "email", "__twitter_id"}
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
err := Filter(&buf, []byte(input2), keys)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStrip(b *testing.B) {
|
||||
path := [][]byte{[]byte("data"), []byte("users")}
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
Strip([]byte(input3), path)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReplace(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
from := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`[{ "name": "hello" }, { "name": "world"}]`)},
|
||||
{[]byte("__twitter_id"), []byte(`"ABC123"`)},
|
||||
}
|
||||
|
||||
to := []Field{
|
||||
{[]byte("__twitter_id"), []byte(`"1234567890"`)},
|
||||
{[]byte("some_list"), []byte(`[{"id":1,"embed":{"id":8}},{"id":2},{"id":3},{"id":4},{"id":5},{"id":6},{"id":7},{"id":8},{"id":9},{"id":10},{"id":11},{"id":12},{"id":13}]`)},
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
err := Replace(&buf, []byte(input4), from, to)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
122
jsn/keys.go
Normal file
@ -0,0 +1,122 @@
|
||||
package jsn
|
||||
|
||||
func Keys(b []byte) [][]byte {
|
||||
res := make([][]byte, 0, 20)
|
||||
|
||||
s, e, d := 0, 0, 0
|
||||
|
||||
var k []byte
|
||||
state := expectValue
|
||||
|
||||
st := NewStack()
|
||||
ae := 0
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
|
||||
si := st.Peek()
|
||||
|
||||
switch {
|
||||
case state == expectKey && si != nil && i >= si.ss:
|
||||
i = si.se + 1
|
||||
st.Pop()
|
||||
|
||||
case state == expectKey && b[i] == '{':
|
||||
state = expectObjClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectObjClose && d == 0 && b[i] == '}':
|
||||
state = expectKey
|
||||
if ae != 0 {
|
||||
st.Push(skipInfo{i, ae})
|
||||
ae = 0
|
||||
}
|
||||
e = i
|
||||
i = s
|
||||
|
||||
case state == expectKey && b[i] == '"':
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
state = expectColon
|
||||
k = b[(s + 1):i]
|
||||
|
||||
case state == expectColon && b[i] == ':':
|
||||
state = expectValue
|
||||
|
||||
case state == expectValue && b[i] == '"':
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '{':
|
||||
state = expectObjClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectObjClose && d == 0 && b[i] == '}':
|
||||
state = expectKey
|
||||
e = i
|
||||
i = s
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
state = expectListClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectListClose && d == 0 && b[i] == ']':
|
||||
state = expectKey
|
||||
ae = i
|
||||
e = i
|
||||
i = s
|
||||
|
||||
case state == expectValue && (b[i] >= '0' && b[i] <= '9'):
|
||||
state = expectNumClose
|
||||
s = i
|
||||
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
state = expectBoolClose
|
||||
s = i
|
||||
|
||||
case state == expectBoolClose && (b[i] == 'e' || b[i] == 'E'):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
e = i
|
||||
}
|
||||
|
||||
if e != 0 {
|
||||
if k != nil {
|
||||
res = append(res, k)
|
||||
}
|
||||
|
||||
state = expectKey
|
||||
k = nil
|
||||
e = 0
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
164
jsn/replace.go
Normal file
@ -0,0 +1,164 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
func Replace(w *bytes.Buffer, b []byte, from, to []Field) error {
|
||||
if len(from) != len(to) {
|
||||
return errors.New("'from' and 'to' must be of the same length")
|
||||
}
|
||||
|
||||
h := xxhash.New()
|
||||
tmap := make(map[uint64]int, len(from))
|
||||
|
||||
for i, f := range from {
|
||||
h.Write(f.Key)
|
||||
h.Write(f.Value)
|
||||
|
||||
tmap[h.Sum64()] = i
|
||||
h.Reset()
|
||||
}
|
||||
|
||||
s, e, d := 0, 0, 0
|
||||
|
||||
state := expectKey
|
||||
ws, we := -1, len(b)
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
// skip any left padding whitespace
|
||||
if ws == -1 && (b[i] == '{' || b[i] == '[') {
|
||||
ws = i
|
||||
}
|
||||
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case state == expectKey && b[i] == '"':
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
state = expectColon
|
||||
h.Write(b[(s + 1):i])
|
||||
we = s
|
||||
|
||||
case state == expectColon && b[i] == ':':
|
||||
state = expectValue
|
||||
|
||||
case state == expectValue && b[i] == '"':
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
state = expectListClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectListClose && d == 0 && b[i] == ']':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '{':
|
||||
state = expectObjClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectObjClose && d == 0 && b[i] == '}':
|
||||
e = i
|
||||
|
||||
case state == expectValue && (b[i] >= '0' && b[i] <= '9'):
|
||||
state = expectNumClose
|
||||
s = i
|
||||
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
state = expectBoolClose
|
||||
s = i
|
||||
|
||||
case state == expectBoolClose && (b[i] == 'e' || b[i] == 'E'):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
e = i
|
||||
}
|
||||
|
||||
if e != 0 {
|
||||
e++
|
||||
|
||||
h.Write(b[s:e])
|
||||
n, ok := tmap[h.Sum64()]
|
||||
h.Reset()
|
||||
|
||||
if ok {
|
||||
if _, err := w.Write(b[ws:(we + 1)]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(to[n].Key) != 0 {
|
||||
var err error
|
||||
|
||||
if _, err := w.Write(to[n].Key); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(`":`); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(to[n].Value) != 0 {
|
||||
_, err = w.Write(to[n].Value)
|
||||
} else {
|
||||
_, err = w.WriteString("null")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ws = e
|
||||
} else if b[e] == ',' {
|
||||
ws = e + 1
|
||||
} else {
|
||||
ws = e
|
||||
}
|
||||
}
|
||||
|
||||
if !ok && (b[s] == '[' || b[s] == '{') {
|
||||
// the i++ in the for loop will add 1 so we account for that (s - 1)
|
||||
i = s - 1
|
||||
}
|
||||
|
||||
state = expectKey
|
||||
we = len(b)
|
||||
e = 0
|
||||
d = 0
|
||||
}
|
||||
}
|
||||
|
||||
if ws == -1 || (ws == 0 && we == len(b)) {
|
||||
w.Write(b)
|
||||
} else {
|
||||
w.Write(b[ws:we])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
51
jsn/stack.go
Normal file
@ -0,0 +1,51 @@
|
||||
package jsn
|
||||
|
||||
type skipInfo struct {
|
||||
ss, se int
|
||||
}
|
||||
|
||||
type Stack struct {
|
||||
stA [20]skipInfo
|
||||
st []skipInfo
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new Stack
|
||||
func NewStack() *Stack {
|
||||
s := &Stack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the Stack
|
||||
func (s *Stack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the Stack
|
||||
func (s *Stack) Peek() *skipInfo {
|
||||
if s.top == -1 {
|
||||
return nil
|
||||
}
|
||||
return &s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the Stack and return it
|
||||
func (s *Stack) Pop() *skipInfo {
|
||||
if s.top == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.top--
|
||||
return &s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the Stack
|
||||
func (s *Stack) Push(value skipInfo) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
107
jsn/strip.go
Normal file
@ -0,0 +1,107 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
func Strip(b []byte, path [][]byte) []byte {
|
||||
s, e, d := 0, 0, 0
|
||||
|
||||
ob := b
|
||||
pi := 0
|
||||
pm := false
|
||||
state := expectKey
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
if state == expectObjClose || state == expectListClose {
|
||||
switch b[i] {
|
||||
case '{', '[':
|
||||
d++
|
||||
case '}', ']':
|
||||
d--
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case state == expectKey && b[i] == '"':
|
||||
state = expectKeyClose
|
||||
s = i
|
||||
|
||||
case state == expectKeyClose && b[i] == '"':
|
||||
state = expectColon
|
||||
if pi == len(path) {
|
||||
pi = 0
|
||||
}
|
||||
pm = bytes.Equal(b[(s+1):i], path[pi])
|
||||
if pm {
|
||||
pi++
|
||||
}
|
||||
|
||||
case state == expectColon && b[i] == ':':
|
||||
state = expectValue
|
||||
|
||||
case state == expectValue && b[i] == '"':
|
||||
state = expectString
|
||||
s = i
|
||||
|
||||
case state == expectString && b[i] == '"':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '[':
|
||||
state = expectListClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectListClose && d == 0 && b[i] == ']':
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == '{':
|
||||
state = expectObjClose
|
||||
s = i
|
||||
d++
|
||||
|
||||
case state == expectObjClose && d == 0 && b[i] == '}':
|
||||
e = i
|
||||
|
||||
case state == expectValue && (b[i] >= '0' && b[i] <= '9'):
|
||||
state = expectNumClose
|
||||
s = i
|
||||
|
||||
case state == expectNumClose &&
|
||||
((b[i] < '0' || b[i] > '9') &&
|
||||
(b[i] != '.' && b[i] != 'e' && b[i] != 'E' && b[i] != '+' && b[i] != '-')):
|
||||
i--
|
||||
e = i
|
||||
|
||||
case state == expectValue &&
|
||||
(b[i] == 'f' || b[i] == 'F' || b[i] == 't' || b[i] == 'T'):
|
||||
state = expectBoolClose
|
||||
s = i
|
||||
|
||||
case state == expectBoolClose && (b[i] == 'e' || b[i] == 'E'):
|
||||
e = i
|
||||
|
||||
case state == expectValue && b[i] == 'n':
|
||||
state = expectNull
|
||||
|
||||
case state == expectNull && b[i] == 'l':
|
||||
e = i
|
||||
}
|
||||
|
||||
if e != 0 {
|
||||
if pm && (b[s] == '[' || b[s] == '{') {
|
||||
b = b[s:(e + 1)]
|
||||
i = 0
|
||||
|
||||
if pi == len(path) {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
state = expectKey
|
||||
e = 0
|
||||
}
|
||||
}
|
||||
|
||||
return ob
|
||||
}
|
37
jsn/tree.go
Normal file
@ -0,0 +1,37 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
func Tree(v []byte) (map[string]interface{}, bool, error) {
|
||||
dec := json.NewDecoder(bytes.NewReader(v))
|
||||
array := false
|
||||
|
||||
// read open bracket
|
||||
|
||||
for i := range v {
|
||||
if v[i] != ' ' {
|
||||
array = (v[i] == '[')
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if array {
|
||||
if _, err := dec.Token(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
// while the array contains values
|
||||
var m map[string]interface{}
|
||||
|
||||
// decode an array value (Message)
|
||||
err := dec.Decode(&m)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return m, array, nil
|
||||
}
|
386
jsn/validate.go
Normal file
@ -0,0 +1,386 @@
|
||||
package jsn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Validate validates JSON s.
|
||||
func Validate(s string) error {
|
||||
s = skipWS(s)
|
||||
|
||||
tail, err := validateValue(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse JSON: %s; unparsed tail: %q", err, startEndString(tail))
|
||||
}
|
||||
tail = skipWS(tail)
|
||||
if len(tail) > 0 {
|
||||
return fmt.Errorf("unexpected tail: %q", startEndString(tail))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateBytes validates JSON b.
|
||||
func ValidateBytes(b []byte) error {
|
||||
return Validate(b2s(b))
|
||||
}
|
||||
|
||||
func validateValue(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("cannot parse empty string")
|
||||
}
|
||||
|
||||
if s[0] == '{' {
|
||||
tail, err := validateObject(s[1:])
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot parse object: %s", err)
|
||||
}
|
||||
return tail, nil
|
||||
}
|
||||
if s[0] == '[' {
|
||||
tail, err := validateArray(s[1:])
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot parse array: %s", err)
|
||||
}
|
||||
return tail, nil
|
||||
}
|
||||
if s[0] == '"' {
|
||||
sv, tail, err := validateString(s[1:])
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot parse string: %s", err)
|
||||
}
|
||||
// Scan the string for control chars.
|
||||
for i := 0; i < len(sv); i++ {
|
||||
if sv[i] < 0x20 {
|
||||
return tail, fmt.Errorf("string cannot contain control char 0x%02X", sv[i])
|
||||
}
|
||||
}
|
||||
return tail, nil
|
||||
}
|
||||
if s[0] == 't' {
|
||||
if len(s) < len("true") || s[:len("true")] != "true" {
|
||||
return s, fmt.Errorf("unexpected value found: %q", s)
|
||||
}
|
||||
return s[len("true"):], nil
|
||||
}
|
||||
if s[0] == 'f' {
|
||||
if len(s) < len("false") || s[:len("false")] != "false" {
|
||||
return s, fmt.Errorf("unexpected value found: %q", s)
|
||||
}
|
||||
return s[len("false"):], nil
|
||||
}
|
||||
if s[0] == 'n' {
|
||||
if len(s) < len("null") || s[:len("null")] != "null" {
|
||||
return s, fmt.Errorf("unexpected value found: %q", s)
|
||||
}
|
||||
return s[len("null"):], nil
|
||||
}
|
||||
|
||||
tail, err := validateNumber(s)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot parse number: %s", err)
|
||||
}
|
||||
return tail, nil
|
||||
}
|
||||
|
||||
func validateArray(s string) (string, error) {
|
||||
s = skipWS(s)
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("missing ']'")
|
||||
}
|
||||
if s[0] == ']' {
|
||||
return s[1:], nil
|
||||
}
|
||||
|
||||
for {
|
||||
var err error
|
||||
|
||||
s = skipWS(s)
|
||||
s, err = validateValue(s)
|
||||
if err != nil {
|
||||
return s, fmt.Errorf("cannot parse array value: %s", err)
|
||||
}
|
||||
|
||||
s = skipWS(s)
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("unexpected end of array")
|
||||
}
|
||||
if s[0] == ',' {
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if s[0] == ']' {
|
||||
s = s[1:]
|
||||
return s, nil
|
||||
}
|
||||
return s, fmt.Errorf("missing ',' after array value")
|
||||
}
|
||||
}
|
||||
|
||||
func validateObject(s string) (string, error) {
|
||||
s = skipWS(s)
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("missing '}'")
|
||||
}
|
||||
if s[0] == '}' {
|
||||
return s[1:], nil
|
||||
}
|
||||
|
||||
for {
|
||||
var err error
|
||||
|
||||
// Parse key.
|
||||
s = skipWS(s)
|
||||
if len(s) == 0 || s[0] != '"' {
|
||||
return s, fmt.Errorf(`cannot find opening '"" for object key`)
|
||||
}
|
||||
|
||||
var key string
|
||||
key, s, err = validateKey(s[1:])
|
||||
if err != nil {
|
||||
return s, fmt.Errorf("cannot parse object key: %s", err)
|
||||
}
|
||||
// Scan the key for control chars.
|
||||
for i := 0; i < len(key); i++ {
|
||||
if key[i] < 0x20 {
|
||||
return s, fmt.Errorf("object key cannot contain control char 0x%02X", key[i])
|
||||
}
|
||||
}
|
||||
s = skipWS(s)
|
||||
if len(s) == 0 || s[0] != ':' {
|
||||
return s, fmt.Errorf("missing ':' after object key")
|
||||
}
|
||||
s = s[1:]
|
||||
|
||||
// Parse value
|
||||
s = skipWS(s)
|
||||
s, err = validateValue(s)
|
||||
if err != nil {
|
||||
return s, fmt.Errorf("cannot parse object value: %s", err)
|
||||
}
|
||||
s = skipWS(s)
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("unexpected end of object")
|
||||
}
|
||||
if s[0] == ',' {
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if s[0] == '}' {
|
||||
return s[1:], nil
|
||||
}
|
||||
return s, fmt.Errorf("missing ',' after object value")
|
||||
}
|
||||
}
|
||||
|
||||
// validateKey is similar to validateString, but is optimized
|
||||
// for typical object keys, which are quite small and have no escape sequences.
|
||||
func validateKey(s string) (string, string, error) {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == '"' {
|
||||
// Fast path - the key doesn't contain escape sequences.
|
||||
return s[:i], s[i+1:], nil
|
||||
}
|
||||
if s[i] == '\\' {
|
||||
// Slow path - the key contains escape sequences.
|
||||
return validateString(s)
|
||||
}
|
||||
}
|
||||
return "", s, fmt.Errorf(`missing closing '"'`)
|
||||
}
|
||||
|
||||
func validateString(s string) (string, string, error) {
|
||||
// Try fast path - a string without escape sequences.
|
||||
if n := strings.IndexByte(s, '"'); n >= 0 && strings.IndexByte(s[:n], '\\') < 0 {
|
||||
return s[:n], s[n+1:], nil
|
||||
}
|
||||
|
||||
// Slow path - escape sequences are present.
|
||||
rs, tail, err := parseRawString(s)
|
||||
if err != nil {
|
||||
return rs, tail, err
|
||||
}
|
||||
for {
|
||||
n := strings.IndexByte(rs, '\\')
|
||||
if n < 0 {
|
||||
return rs, tail, nil
|
||||
}
|
||||
n++
|
||||
if n >= len(rs) {
|
||||
return rs, tail, fmt.Errorf("BUG: parseRawString returned invalid string with trailing backslash: %q", rs)
|
||||
}
|
||||
ch := rs[n]
|
||||
rs = rs[n+1:]
|
||||
switch ch {
|
||||
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
|
||||
// Valid escape sequences - see http://json.org/
|
||||
break
|
||||
case 'u':
|
||||
if len(rs) < 4 {
|
||||
return rs, tail, fmt.Errorf(`too short escape sequence: \u%s`, rs)
|
||||
}
|
||||
xs := rs[:4]
|
||||
_, err := strconv.ParseUint(xs, 16, 16)
|
||||
if err != nil {
|
||||
return rs, tail, fmt.Errorf(`invalid escape sequence \u%s: %s`, xs, err)
|
||||
}
|
||||
rs = rs[4:]
|
||||
default:
|
||||
return rs, tail, fmt.Errorf(`unknown escape sequence \%c`, ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateNumber(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("zero-length number")
|
||||
}
|
||||
if s[0] == '-' {
|
||||
s = s[1:]
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("missing number after minus")
|
||||
}
|
||||
}
|
||||
i := 0
|
||||
for i < len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i <= 0 {
|
||||
return s, fmt.Errorf("expecting 0..9 digit, got %c", s[0])
|
||||
}
|
||||
if s[0] == '0' && i != 1 {
|
||||
return s, fmt.Errorf("unexpected number starting from 0")
|
||||
}
|
||||
if i >= len(s) {
|
||||
return "", nil
|
||||
}
|
||||
if s[i] == '.' {
|
||||
// Validate fractional part
|
||||
s = s[i+1:]
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("missing fractional part")
|
||||
}
|
||||
i = 0
|
||||
for i < len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
return s, fmt.Errorf("expecting 0..9 digit in fractional part, got %c", s[0])
|
||||
}
|
||||
if i >= len(s) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
if s[i] == 'e' || s[i] == 'E' {
|
||||
// Validate exponent part
|
||||
s = s[i+1:]
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("missing exponent part")
|
||||
}
|
||||
if s[0] == '-' || s[0] == '+' {
|
||||
s = s[1:]
|
||||
if len(s) == 0 {
|
||||
return s, fmt.Errorf("missing exponent part")
|
||||
}
|
||||
}
|
||||
i = 0
|
||||
for i < len(s) {
|
||||
if s[i] < '0' || s[i] > '9' {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
return s, fmt.Errorf("expecting 0..9 digit in exponent part, got %c", s[0])
|
||||
}
|
||||
if i >= len(s) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
return s[i:], nil
|
||||
}
|
||||
|
||||
func skipWS(s string) string {
|
||||
if len(s) == 0 || s[0] > 0x20 {
|
||||
// Fast path.
|
||||
return s
|
||||
}
|
||||
return skipWSSlow(s)
|
||||
}
|
||||
|
||||
func skipWSSlow(s string) string {
|
||||
if len(s) == 0 || s[0] != 0x20 && s[0] != 0x0A && s[0] != 0x09 && s[0] != 0x0D {
|
||||
return s
|
||||
}
|
||||
for i := 1; i < len(s); i++ {
|
||||
if s[i] != 0x20 && s[i] != 0x0A && s[i] != 0x09 && s[i] != 0x0D {
|
||||
return s[i:]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func b2s(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func s2b(s string) []byte {
|
||||
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
var sh reflect.SliceHeader
|
||||
sh.Data = strh.Data
|
||||
sh.Len = strh.Len
|
||||
sh.Cap = strh.Len
|
||||
return *(*[]byte)(unsafe.Pointer(&sh))
|
||||
}
|
||||
|
||||
const maxStartEndStringLen = 80
|
||||
|
||||
func startEndString(s string) string {
|
||||
if len(s) <= maxStartEndStringLen {
|
||||
return s
|
||||
}
|
||||
start := s[:40]
|
||||
end := s[len(s)-40:]
|
||||
return start + "..." + end
|
||||
}
|
||||
|
||||
func parseRawString(s string) (string, string, error) {
|
||||
n := strings.IndexByte(s, '"')
|
||||
if n < 0 {
|
||||
return s, "", fmt.Errorf(`missing closing '"'`)
|
||||
}
|
||||
if n == 0 || s[n-1] != '\\' {
|
||||
// Fast path. No escaped ".
|
||||
return s[:n], s[n+1:], nil
|
||||
}
|
||||
|
||||
// Slow path - possible escaped " found.
|
||||
ss := s
|
||||
for {
|
||||
i := n - 1
|
||||
for i > 0 && s[i-1] == '\\' {
|
||||
i--
|
||||
}
|
||||
if uint(n-i)%2 == 0 {
|
||||
return ss[:len(ss)-len(s)+n], s[n+1:], nil
|
||||
}
|
||||
s = s[n+1:]
|
||||
|
||||
n = strings.IndexByte(s, '"')
|
||||
if n < 0 {
|
||||
return ss, "", fmt.Errorf(`missing closing '"'`)
|
||||
}
|
||||
if n == 0 || s[n-1] != '\\' {
|
||||
return ss[:len(ss)-len(s)+n], s[n+1:], nil
|
||||
}
|
||||
}
|
||||
}
|
12
migrate/connection_settings_test.go.example
Normal file
@ -0,0 +1,12 @@
|
||||
package migrate_test
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx"
|
||||
)
|
||||
|
||||
var defaultConnectionParameters *pgx.ConnConfig = &pgx.ConnConfig{
|
||||
// Host: "127.0.0.1",
|
||||
User: "tern",
|
||||
Password: "secret", // Password is usually not needed when using socket
|
||||
Database: "tern_migrate_test",
|
||||
}
|
373
migrate/migrate.go
Normal file
@ -0,0 +1,373 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var migrationPattern = regexp.MustCompile(`\A(\d+)_.+\.sql\z`)
|
||||
|
||||
var ErrNoFwMigration = errors.Errorf("no sql in forward migration step")
|
||||
|
||||
type BadVersionError string
|
||||
|
||||
func (e BadVersionError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
type IrreversibleMigrationError struct {
|
||||
m *Migration
|
||||
}
|
||||
|
||||
func (e IrreversibleMigrationError) Error() string {
|
||||
return fmt.Sprintf("Irreversible migration: %d - %s", e.m.Sequence, e.m.Name)
|
||||
}
|
||||
|
||||
type NoMigrationsFoundError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (e NoMigrationsFoundError) Error() string {
|
||||
return fmt.Sprintf("No migrations found at %s", e.Path)
|
||||
}
|
||||
|
||||
type MigrationPgError struct {
|
||||
Sql string
|
||||
Error error
|
||||
}
|
||||
|
||||
type Migration struct {
|
||||
Sequence int32
|
||||
Name string
|
||||
UpSQL string
|
||||
DownSQL string
|
||||
}
|
||||
|
||||
type MigratorOptions struct {
|
||||
// DisableTx causes the Migrator not to run migrations in a transaction.
|
||||
DisableTx bool
|
||||
// MigratorFS is the interface used for collecting the migrations.
|
||||
MigratorFS MigratorFS
|
||||
}
|
||||
|
||||
type Migrator struct {
|
||||
conn *pgx.Conn
|
||||
versionTable string
|
||||
options *MigratorOptions
|
||||
Migrations []*Migration
|
||||
OnStart func(int32, string, string, string) // OnStart is called when a migration is run with the sequence, name, direction, and SQL
|
||||
Data map[string]interface{} // Data available to use in migrations
|
||||
}
|
||||
|
||||
func NewMigrator(conn *pgx.Conn, versionTable string) (m *Migrator, err error) {
|
||||
return NewMigratorEx(conn, versionTable, &MigratorOptions{MigratorFS: defaultMigratorFS{}})
|
||||
}
|
||||
|
||||
func NewMigratorEx(conn *pgx.Conn, versionTable string, opts *MigratorOptions) (m *Migrator, err error) {
|
||||
m = &Migrator{conn: conn, versionTable: versionTable, options: opts}
|
||||
err = m.ensureSchemaVersionTableExists()
|
||||
m.Migrations = make([]*Migration, 0)
|
||||
m.Data = make(map[string]interface{})
|
||||
return
|
||||
}
|
||||
|
||||
type MigratorFS interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
ReadFile(filename string) ([]byte, error)
|
||||
Glob(pattern string) (matches []string, err error)
|
||||
}
|
||||
|
||||
type defaultMigratorFS struct{}
|
||||
|
||||
func (defaultMigratorFS) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
|
||||
func (defaultMigratorFS) ReadFile(filename string) ([]byte, error) {
|
||||
return ioutil.ReadFile(filename)
|
||||
}
|
||||
|
||||
func (defaultMigratorFS) Glob(pattern string) ([]string, error) {
|
||||
return filepath.Glob(pattern)
|
||||
}
|
||||
|
||||
func FindMigrationsEx(path string, fs MigratorFS) ([]string, error) {
|
||||
path = strings.TrimRight(path, string(filepath.Separator))
|
||||
|
||||
fileInfos, err := fs.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
paths := make([]string, 0, len(fileInfos))
|
||||
for _, fi := range fileInfos {
|
||||
if fi.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
matches := migrationPattern.FindStringSubmatch(fi.Name())
|
||||
if len(matches) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
n, err := strconv.ParseInt(matches[1], 10, 32)
|
||||
if err != nil {
|
||||
// The regexp already validated that the prefix is all digits so this *should* never fail
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mcount := len(paths) + 100
|
||||
|
||||
if n < int64(mcount) {
|
||||
return nil, fmt.Errorf("Duplicate migration %d", n)
|
||||
}
|
||||
|
||||
if int64(mcount) < n {
|
||||
return nil, fmt.Errorf("Missing migration %d", mcount)
|
||||
}
|
||||
|
||||
paths = append(paths, filepath.Join(path, fi.Name()))
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func FindMigrations(path string) ([]string, error) {
|
||||
return FindMigrationsEx(path, defaultMigratorFS{})
|
||||
}
|
||||
|
||||
func (m *Migrator) LoadMigrations(path string) error {
|
||||
path = strings.TrimRight(path, string(filepath.Separator))
|
||||
|
||||
mainTmpl := template.New("main")
|
||||
sharedPaths, err := m.options.MigratorFS.Glob(filepath.Join(path, "*", "*.sql"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, p := range sharedPaths {
|
||||
body, err := m.options.MigratorFS.ReadFile(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := strings.Replace(p, path+string(filepath.Separator), "", 1)
|
||||
_, err = mainTmpl.New(name).Parse(string(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
paths, err := FindMigrationsEx(path, m.options.MigratorFS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
return NoMigrationsFoundError{Path: path}
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
body, err := m.options.MigratorFS.ReadFile(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pieces := strings.SplitN(string(body), "---- create above / drop below ----", 2)
|
||||
var upSQL, downSQL string
|
||||
upSQL = strings.TrimSpace(pieces[0])
|
||||
upSQL, err = m.evalMigration(mainTmpl.New(filepath.Base(p)+" up"), upSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure there is SQL in the forward migration step.
|
||||
containsSQL := false
|
||||
for _, v := range strings.Split(upSQL, "\n") {
|
||||
// Only account for regular single line comment, empty line and space/comment combination
|
||||
cleanString := strings.TrimSpace(v)
|
||||
if len(cleanString) != 0 &&
|
||||
!strings.HasPrefix(cleanString, "--") {
|
||||
containsSQL = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !containsSQL {
|
||||
return ErrNoFwMigration
|
||||
}
|
||||
|
||||
if len(pieces) == 2 {
|
||||
downSQL = strings.TrimSpace(pieces[1])
|
||||
downSQL, err = m.evalMigration(mainTmpl.New(filepath.Base(p)+" down"), downSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m.AppendMigration(filepath.Base(p), upSQL, downSQL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) evalMigration(tmpl *template.Template, sql string) (string, error) {
|
||||
tmpl, err := tmpl.Parse(sql)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = tmpl.Execute(&buf, m.Data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func (m *Migrator) AppendMigration(name, upSQL, downSQL string) {
|
||||
m.Migrations = append(
|
||||
m.Migrations,
|
||||
&Migration{
|
||||
Sequence: int32(len(m.Migrations)) + 1,
|
||||
Name: name,
|
||||
UpSQL: upSQL,
|
||||
DownSQL: downSQL,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Migrate runs pending migrations
|
||||
// It calls m.OnStart when it begins a migration
|
||||
func (m *Migrator) Migrate() error {
|
||||
return m.MigrateTo(int32(len(m.Migrations)))
|
||||
}
|
||||
|
||||
// MigrateTo migrates to targetVersion
|
||||
func (m *Migrator) MigrateTo(targetVersion int32) (err error) {
|
||||
ctx := context.Background()
|
||||
// Lock to ensure multiple migrations cannot occur simultaneously
|
||||
lockNum := int64(9628173550095224) // arbitrary random number
|
||||
if _, lockErr := m.conn.Exec(ctx, "select pg_advisory_lock($1)", lockNum); lockErr != nil {
|
||||
return lockErr
|
||||
}
|
||||
defer func() {
|
||||
_, unlockErr := m.conn.Exec(ctx, "select pg_advisory_unlock($1)", lockNum)
|
||||
if err == nil && unlockErr != nil {
|
||||
err = unlockErr
|
||||
}
|
||||
}()
|
||||
|
||||
currentVersion, err := m.GetCurrentVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if targetVersion < 0 || int32(len(m.Migrations)) < targetVersion {
|
||||
errMsg := fmt.Sprintf("destination version %d is outside the valid versions of 0 to %d", targetVersion, len(m.Migrations))
|
||||
return BadVersionError(errMsg)
|
||||
}
|
||||
|
||||
if currentVersion < 0 || int32(len(m.Migrations)) < currentVersion {
|
||||
errMsg := fmt.Sprintf("current version %d is outside the valid versions of 0 to %d", currentVersion, len(m.Migrations))
|
||||
return BadVersionError(errMsg)
|
||||
}
|
||||
|
||||
var direction int32
|
||||
if currentVersion < targetVersion {
|
||||
direction = 1
|
||||
} else {
|
||||
direction = -1
|
||||
}
|
||||
|
||||
for currentVersion != targetVersion {
|
||||
var current *Migration
|
||||
var sql, directionName string
|
||||
var sequence int32
|
||||
if direction == 1 {
|
||||
current = m.Migrations[currentVersion]
|
||||
sequence = current.Sequence
|
||||
sql = current.UpSQL
|
||||
directionName = "up"
|
||||
} else {
|
||||
current = m.Migrations[currentVersion-1]
|
||||
sequence = current.Sequence - 1
|
||||
sql = current.DownSQL
|
||||
directionName = "down"
|
||||
if current.DownSQL == "" {
|
||||
return IrreversibleMigrationError{m: current}
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
tx, err := m.conn.Begin(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback(ctx)
|
||||
|
||||
// Fire on start callback
|
||||
if m.OnStart != nil {
|
||||
m.OnStart(current.Sequence, current.Name, directionName, sql)
|
||||
}
|
||||
|
||||
// Execute the migration
|
||||
_, err = tx.Exec(ctx, sql)
|
||||
if err != nil {
|
||||
// if err, ok := err.(pgx.PgError); ok {
|
||||
// return MigrationPgError{Sql: sql, PgError: err}
|
||||
// }
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset all database connection settings. Important to do before updating version as search_path may have been changed.
|
||||
tx.Exec(ctx, "reset all")
|
||||
|
||||
// Add one to the version
|
||||
_, err = tx.Exec(ctx, "update "+m.versionTable+" set version=$1", sequence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentVersion = currentVersion + direction
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) GetCurrentVersion() (v int32, err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
err = m.conn.QueryRow(ctx, "select version from "+m.versionTable).Scan(&v)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (m *Migrator) ensureSchemaVersionTableExists() (err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
_, err = m.conn.Exec(ctx, fmt.Sprintf(`
|
||||
create table if not exists %s(version int4 not null);
|
||||
|
||||
insert into %s(version)
|
||||
select 0
|
||||
where 0=(select count(*) from %s);
|
||||
`, m.versionTable, m.versionTable, m.versionTable))
|
||||
|
||||
return err
|
||||
}
|
352
migrate/migrate_test.go
Normal file
@ -0,0 +1,352 @@
|
||||
package migrate_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
type MigrateSuite struct {
|
||||
conn *pgx.Conn
|
||||
}
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
var _ = Suite(&MigrateSuite{})
|
||||
|
||||
var versionTable string = "schema_version_non_default"
|
||||
|
||||
func (s *MigrateSuite) SetUpTest(c *C) {
|
||||
var err error
|
||||
s.conn, err = pgx.Connect(*defaultConnectionParameters)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
s.cleanupSampleMigrator(c)
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) currentVersion(c *C) int32 {
|
||||
var n int32
|
||||
err := s.conn.QueryRow("select version from " + versionTable).Scan(&n)
|
||||
c.Assert(err, IsNil)
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) Exec(c *C, sql string, arguments ...interface{}) pgx.CommandTag {
|
||||
commandTag, err := s.conn.Exec(sql, arguments...)
|
||||
c.Assert(err, IsNil)
|
||||
return commandTag
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) tableExists(c *C, tableName string) bool {
|
||||
var exists bool
|
||||
err := s.conn.QueryRow(
|
||||
"select exists(select 1 from information_schema.tables where table_catalog=$1 and table_name=$2)",
|
||||
defaultConnectionParameters.Database,
|
||||
tableName,
|
||||
).Scan(&exists)
|
||||
c.Assert(err, IsNil)
|
||||
return exists
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) createEmptyMigrator(c *C) *migrate.Migrator {
|
||||
var err error
|
||||
m, err := migrate.NewMigrator(s.conn, versionTable)
|
||||
c.Assert(err, IsNil)
|
||||
return m
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) createSampleMigrator(c *C) *migrate.Migrator {
|
||||
m := s.createEmptyMigrator(c)
|
||||
m.AppendMigration("Create t1", "create table t1(id serial);", "drop table t1;")
|
||||
m.AppendMigration("Create t2", "create table t2(id serial);", "drop table t2;")
|
||||
m.AppendMigration("Create t3", "create table t3(id serial);", "drop table t3;")
|
||||
return m
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) cleanupSampleMigrator(c *C) {
|
||||
tables := []string{versionTable, "t1", "t2", "t3"}
|
||||
for _, table := range tables {
|
||||
s.Exec(c, "drop table if exists "+table)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestNewMigrator(c *C) {
|
||||
var m *migrate.Migrator
|
||||
var err error
|
||||
|
||||
// Initial run
|
||||
m, err = migrate.NewMigrator(s.conn, versionTable)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Creates version table
|
||||
schemaVersionExists := s.tableExists(c, versionTable)
|
||||
c.Assert(schemaVersionExists, Equals, true)
|
||||
|
||||
// Succeeds when version table is already created
|
||||
m, err = migrate.NewMigrator(s.conn, versionTable)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
initialVersion, err := m.GetCurrentVersion()
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(initialVersion, Equals, int32(0))
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestAppendMigration(c *C) {
|
||||
m := s.createEmptyMigrator(c)
|
||||
|
||||
name := "Create t"
|
||||
upSQL := "create t..."
|
||||
downSQL := "drop t..."
|
||||
m.AppendMigration(name, upSQL, downSQL)
|
||||
|
||||
c.Assert(len(m.Migrations), Equals, 1)
|
||||
c.Assert(m.Migrations[0].Name, Equals, name)
|
||||
c.Assert(m.Migrations[0].UpSQL, Equals, upSQL)
|
||||
c.Assert(m.Migrations[0].DownSQL, Equals, downSQL)
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestLoadMigrationsMissingDirectory(c *C) {
|
||||
m := s.createEmptyMigrator(c)
|
||||
err := m.LoadMigrations("testdata/missing")
|
||||
c.Assert(err, ErrorMatches, "open testdata/missing: no such file or directory")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestLoadMigrationsEmptyDirectory(c *C) {
|
||||
m := s.createEmptyMigrator(c)
|
||||
err := m.LoadMigrations("testdata/empty")
|
||||
c.Assert(err, ErrorMatches, "No migrations found at testdata/empty")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestFindMigrationsWithGaps(c *C) {
|
||||
_, err := migrate.FindMigrations("testdata/gap")
|
||||
c.Assert(err, ErrorMatches, "Missing migration 2")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestFindMigrationsWithDuplicate(c *C) {
|
||||
_, err := migrate.FindMigrations("testdata/duplicate")
|
||||
c.Assert(err, ErrorMatches, "Duplicate migration 2")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestLoadMigrations(c *C) {
|
||||
m := s.createEmptyMigrator(c)
|
||||
m.Data = map[string]interface{}{"prefix": "foo"}
|
||||
err := m.LoadMigrations("testdata/sample")
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(m.Migrations, HasLen, 5)
|
||||
|
||||
c.Check(m.Migrations[0].Name, Equals, "001_create_t1.sql")
|
||||
c.Check(m.Migrations[0].UpSQL, Equals, `create table t1(
|
||||
id serial primary key
|
||||
);`)
|
||||
c.Check(m.Migrations[0].DownSQL, Equals, "drop table t1;")
|
||||
|
||||
c.Check(m.Migrations[1].Name, Equals, "002_create_t2.sql")
|
||||
c.Check(m.Migrations[1].UpSQL, Equals, `create table t2(
|
||||
id serial primary key
|
||||
);`)
|
||||
c.Check(m.Migrations[1].DownSQL, Equals, "drop table t2;")
|
||||
|
||||
c.Check(m.Migrations[2].Name, Equals, "003_irreversible.sql")
|
||||
c.Check(m.Migrations[2].UpSQL, Equals, "drop table t2;")
|
||||
c.Check(m.Migrations[2].DownSQL, Equals, "")
|
||||
|
||||
c.Check(m.Migrations[3].Name, Equals, "004_data_interpolation.sql")
|
||||
c.Check(m.Migrations[3].UpSQL, Equals, "create table foo_bar(id serial primary key);")
|
||||
c.Check(m.Migrations[3].DownSQL, Equals, "drop table foo_bar;")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestLoadMigrationsNoForward(c *C) {
|
||||
var err error
|
||||
m, err := migrate.NewMigrator(s.conn, versionTable)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
m.Data = map[string]interface{}{"prefix": "foo"}
|
||||
err = m.LoadMigrations("testdata/noforward")
|
||||
c.Assert(err, Equals, migrate.ErrNoFwMigration)
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestMigrate(c *C) {
|
||||
m := s.createSampleMigrator(c)
|
||||
|
||||
err := m.Migrate()
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion := s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(3))
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestMigrateToLifeCycle(c *C) {
|
||||
m := s.createSampleMigrator(c)
|
||||
|
||||
var onStartCallUpCount int
|
||||
var onStartCallDownCount int
|
||||
m.OnStart = func(_ int32, _, direction, _ string) {
|
||||
switch direction {
|
||||
case "up":
|
||||
onStartCallUpCount++
|
||||
case "down":
|
||||
onStartCallDownCount++
|
||||
default:
|
||||
c.Fatalf("Unexpected direction: %s", direction)
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate from 0 up to 1
|
||||
err := m.MigrateTo(1)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion := s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(1))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, false)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, false)
|
||||
c.Assert(onStartCallUpCount, Equals, 1)
|
||||
c.Assert(onStartCallDownCount, Equals, 0)
|
||||
|
||||
// Migrate from 1 up to 3
|
||||
err = m.MigrateTo(3)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion = s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(3))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, true)
|
||||
c.Assert(onStartCallUpCount, Equals, 3)
|
||||
c.Assert(onStartCallDownCount, Equals, 0)
|
||||
|
||||
// Migrate from 3 to 3 is no-op
|
||||
err = m.MigrateTo(3)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion = s.currentVersion(c)
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, true)
|
||||
c.Assert(onStartCallUpCount, Equals, 3)
|
||||
c.Assert(onStartCallDownCount, Equals, 0)
|
||||
|
||||
// Migrate from 3 down to 1
|
||||
err = m.MigrateTo(1)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion = s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(1))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, false)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, false)
|
||||
c.Assert(onStartCallUpCount, Equals, 3)
|
||||
c.Assert(onStartCallDownCount, Equals, 2)
|
||||
|
||||
// Migrate from 1 down to 0
|
||||
err = m.MigrateTo(0)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion = s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(0))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, false)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, false)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, false)
|
||||
c.Assert(onStartCallUpCount, Equals, 3)
|
||||
c.Assert(onStartCallDownCount, Equals, 3)
|
||||
|
||||
// Migrate back up to 3
|
||||
err = m.MigrateTo(3)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion = s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(3))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, true)
|
||||
c.Assert(onStartCallUpCount, Equals, 6)
|
||||
c.Assert(onStartCallDownCount, Equals, 3)
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestMigrateToBoundaries(c *C) {
|
||||
m := s.createSampleMigrator(c)
|
||||
|
||||
// Migrate to -1 is error
|
||||
err := m.MigrateTo(-1)
|
||||
c.Assert(err, ErrorMatches, "destination version -1 is outside the valid versions of 0 to 3")
|
||||
|
||||
// Migrate past end is error
|
||||
err = m.MigrateTo(int32(len(m.Migrations)) + 1)
|
||||
c.Assert(err, ErrorMatches, "destination version 4 is outside the valid versions of 0 to 3")
|
||||
|
||||
// When schema version says it is negative
|
||||
s.Exec(c, "update "+versionTable+" set version=-1")
|
||||
err = m.MigrateTo(int32(1))
|
||||
c.Assert(err, ErrorMatches, "current version -1 is outside the valid versions of 0 to 3")
|
||||
|
||||
// When schema version says it is negative
|
||||
s.Exec(c, "update "+versionTable+" set version=4")
|
||||
err = m.MigrateTo(int32(1))
|
||||
c.Assert(err, ErrorMatches, "current version 4 is outside the valid versions of 0 to 3")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestMigrateToIrreversible(c *C) {
|
||||
m := s.createEmptyMigrator(c)
|
||||
m.AppendMigration("Foo", "drop table if exists t3", "")
|
||||
|
||||
err := m.MigrateTo(1)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = m.MigrateTo(0)
|
||||
c.Assert(err, ErrorMatches, "Irreversible migration: 1 - Foo")
|
||||
}
|
||||
|
||||
func (s *MigrateSuite) TestMigrateToDisableTx(c *C) {
|
||||
m, err := migrate.NewMigratorEx(s.conn, versionTable, &migrate.MigratorOptions{DisableTx: true})
|
||||
c.Assert(err, IsNil)
|
||||
m.AppendMigration("Create t1", "create table t1(id serial);", "drop table t1;")
|
||||
m.AppendMigration("Create t2", "create table t2(id serial);", "drop table t2;")
|
||||
m.AppendMigration("Create t3", "create table t3(id serial);", "drop table t3;")
|
||||
|
||||
tx, err := s.conn.Begin()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
err = m.MigrateTo(3)
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion := s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(3))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, true)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, true)
|
||||
|
||||
err = tx.Rollback()
|
||||
c.Assert(err, IsNil)
|
||||
currentVersion = s.currentVersion(c)
|
||||
c.Assert(currentVersion, Equals, int32(0))
|
||||
c.Assert(s.tableExists(c, "t1"), Equals, false)
|
||||
c.Assert(s.tableExists(c, "t2"), Equals, false)
|
||||
c.Assert(s.tableExists(c, "t3"), Equals, false)
|
||||
}
|
||||
|
||||
func Example_OnStartMigrationProgressLogging() {
|
||||
conn, err := pgx.Connect(*defaultConnectionParameters)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to establish connection: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Clear any previous runs
|
||||
if _, err = conn.Exec("drop table if exists schema_version"); err != nil {
|
||||
fmt.Printf("Unable to drop schema_version table: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var m *migrate.Migrator
|
||||
m, err = migrate.NewMigrator(conn, "schema_version")
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to create migrator: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
m.OnStart = func(_ int32, name, direction, _ string) {
|
||||
fmt.Printf("Migrating %s: %s", direction, name)
|
||||
}
|
||||
|
||||
m.AppendMigration("create a table", "create temporary table foo(id serial primary key)", "")
|
||||
|
||||
if err = m.Migrate(); err != nil {
|
||||
fmt.Printf("Unexpected failure migrating: %v", err)
|
||||
return
|
||||
}
|
||||
// Output:
|
||||
// Migrating up: create a table
|
||||
}
|
||||
*/
|
7
migrate/testdata/duplicate/001_create_t1.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
create table t1(
|
||||
id serial primary key
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table t1;
|
7
migrate/testdata/duplicate/002_create_t2.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
create table t2(
|
||||
id serial primary key
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table t2;
|
7
migrate/testdata/duplicate/002_duplicate.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
create table duplicate(
|
||||
id serial primary key
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table duplicate;
|
7
migrate/testdata/gap/001_create_t1.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
create table t1(
|
||||
id serial primary key
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table t1;
|
1
migrate/testdata/gap/003_irreversible.sql
vendored
Normal file
@ -0,0 +1 @@
|
||||
drop table t2;
|
7
migrate/testdata/noforward/001_create_no_forward.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
-- no SQL here
|
||||
-- nor here, just all comments.
|
||||
-- comment with space before
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table t1;
|
7
migrate/testdata/sample/001_create_t1.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
create table t1(
|
||||
id serial primary key
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table t1;
|
7
migrate/testdata/sample/002_create_t2.sql
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
create table t2(
|
||||
id serial primary key
|
||||
);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table t2;
|
1
migrate/testdata/sample/003_irreversible.sql
vendored
Normal file
@ -0,0 +1 @@
|
||||
drop table t2;
|
5
migrate/testdata/sample/004_data_interpolation.sql
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
create table {{.prefix}}_bar(id serial primary key);
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop table {{.prefix}}_bar;
|
5
migrate/testdata/sample/005_template_inclusion.sql
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
{{ template "shared/v1_001.sql" . }}
|
||||
|
||||
---- create above / drop below ----
|
||||
|
||||
drop view {{.prefix}}v1;
|
1
migrate/testdata/sample/shared/v1_001.sql
vendored
Normal file
@ -0,0 +1 @@
|
||||
create view {{.prefix}}v1 as select * from t1;
|
1
migrate/testdata/sample/should_be_ignored.sql
vendored
Normal file
@ -0,0 +1 @@
|
||||
-- This file should be ignored because it does not start with a number.
|
7
psql/bench.0
Normal file
@ -0,0 +1,7 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompile-8 50000 38531 ns/op 5175 B/op 148 allocs/op
|
||||
BenchmarkCompileParallel-8 200000 11472 ns/op 5237 B/op 148 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 4.686s
|
7
psql/bench.8
Normal file
@ -0,0 +1,7 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompile-8 100000 15092 ns/op 3562 B/op 36 allocs/op
|
||||
BenchmarkCompileParallel-8 300000 4684 ns/op 3592 B/op 36 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 3.133s
|
@ -1,6 +0,0 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompileGQLToSQL-8 30000 38686 ns/op 15110 B/op 262 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 1.637s
|
@ -1,16 +0,0 @@
|
||||
? github.com/dosco/super-graph [no test files]
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/psql
|
||||
BenchmarkCompileGQLToSQL-8 30000 45507 ns/op 14565 B/op 244 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/psql 1.846s
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/qcode
|
||||
BenchmarkParse-8 2000000000 0.00 ns/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/qcode 0.008s
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/serv 0.017s
|
||||
? github.com/dosco/super-graph/util [no test files]
|
197
psql/insert.go
Normal file
@ -0,0 +1,197 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/dosco/super-graph/jsn"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
var zeroPaging = qcode.Paging{}
|
||||
|
||||
func (co *Compiler) compileMutation(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
|
||||
switch root.Action {
|
||||
case qcode.ActionInsert:
|
||||
if _, err := c.renderInsert(qc, w, vars); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.ActionUpdate:
|
||||
if _, err := c.renderUpdate(qc, w, vars); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
case qcode.ActionDelete:
|
||||
if _, err := c.renderDelete(qc, w, vars); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
default:
|
||||
return 0, errors.New("valid mutations are 'insert' and 'update'")
|
||||
}
|
||||
|
||||
root.Paging = zeroPaging
|
||||
root.DistinctOn = root.DistinctOn[:]
|
||||
root.OrderBy = root.OrderBy[:]
|
||||
root.Where = nil
|
||||
root.Args = nil
|
||||
|
||||
return c.compileQuery(qc, w)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderInsert(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
insert, ok := vars[root.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Variable '%s' not defined", root.ActionVar)
|
||||
}
|
||||
|
||||
ti, err := c.schema.GetTable(root.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
jt, array, err := jsn.Tree(insert)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c.w.WriteString(`WITH `)
|
||||
quoted(c.w, ti.Name)
|
||||
|
||||
c.w.WriteString(` AS (WITH "input" AS (SELECT {{`)
|
||||
c.w.WriteString(root.ActionVar)
|
||||
c.w.WriteString(`}}::json AS j) INSERT INTO `)
|
||||
c.w.WriteString(ti.Name)
|
||||
io.WriteString(c.w, ` (`)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
io.WriteString(c.w, `)`)
|
||||
|
||||
c.w.WriteString(` SELECT `)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
c.w.WriteString(` FROM input i, `)
|
||||
|
||||
if array {
|
||||
c.w.WriteString(`json_populate_recordset`)
|
||||
} else {
|
||||
c.w.WriteString(`json_populate_record`)
|
||||
}
|
||||
|
||||
c.w.WriteString(`(NULL::`)
|
||||
c.w.WriteString(ti.Name)
|
||||
c.w.WriteString(`, i.j) t RETURNING *) `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderInsertUpdateColumns(qc *qcode.QCode, w *bytes.Buffer,
|
||||
jt map[string]interface{}, ti *DBTableInfo) (uint32, error) {
|
||||
|
||||
i := 0
|
||||
for _, cn := range ti.ColumnNames {
|
||||
if _, ok := jt[cn]; !ok {
|
||||
continue
|
||||
}
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, `, `)
|
||||
}
|
||||
c.w.WriteString(cn)
|
||||
i++
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderUpdate(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
update, ok := vars[root.ActionVar]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Variable '%s' not defined", root.ActionVar)
|
||||
}
|
||||
|
||||
ti, err := c.schema.GetTable(root.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
jt, array, err := jsn.Tree(update)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c.w.WriteString(`WITH `)
|
||||
quoted(c.w, ti.Name)
|
||||
|
||||
c.w.WriteString(` AS (WITH "input" AS (SELECT {{`)
|
||||
c.w.WriteString(root.ActionVar)
|
||||
c.w.WriteString(`}}::json AS j) UPDATE `)
|
||||
c.w.WriteString(ti.Name)
|
||||
io.WriteString(c.w, ` SET (`)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
|
||||
c.w.WriteString(`) = (SELECT `)
|
||||
c.renderInsertUpdateColumns(qc, w, jt, ti)
|
||||
c.w.WriteString(` FROM input i, `)
|
||||
|
||||
if array {
|
||||
c.w.WriteString(`json_populate_recordset`)
|
||||
} else {
|
||||
c.w.WriteString(`json_populate_record`)
|
||||
}
|
||||
|
||||
c.w.WriteString(`(NULL::`)
|
||||
c.w.WriteString(ti.Name)
|
||||
c.w.WriteString(`, i.j) t)`)
|
||||
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` RETURNING *) `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDelete(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
root := &qc.Selects[0]
|
||||
|
||||
ti, err := c.schema.GetTable(root.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c.w.WriteString(`WITH `)
|
||||
quoted(c.w, ti.Name)
|
||||
|
||||
c.w.WriteString(` AS (DELETE FROM `)
|
||||
c.w.WriteString(ti.Name)
|
||||
io.WriteString(c.w, ` WHERE `)
|
||||
|
||||
if err := c.renderWhere(root, ti); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
io.WriteString(c.w, ` RETURNING *) `)
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func quoted(w *bytes.Buffer, identifier string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(identifier)
|
||||
w.WriteString(`"`)
|
||||
}
|
133
psql/insert_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func simpleInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
user(insert: $data) {
|
||||
id
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "users" AS (WITH "input" AS (SELECT {{data}}::json AS j) INSERT INTO users (full_name, email) SELECT full_name, email FROM input i, json_populate_record(NULL::users, i.j) t RETURNING *) SELECT json_object_agg('user', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."id" AS "id") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."id" FROM "users") AS "users_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"data": json.RawMessage(`{"email": "reannagreenholt@orn.com", "full_name": "Flo Barton"}`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func singleInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, insert: $insert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{insert}}::json AS j) INSERT INTO products (name, description) SELECT name, description FROM input i, json_populate_record(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"insert": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func bulkInsert(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, insert: $insert) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{insert}}::json AS j) INSERT INTO products (name, description) SELECT name, description FROM input i, json_populate_recordset(NULL::products, i.j) t RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"insert": json.RawMessage(` [{ "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }]`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func singleUpdate(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(id: 15, update: $update, where: { id: { eq: 1 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (WITH "input" AS (SELECT {{update}}::json AS j) UPDATE products SET (name, description) = (SELECT name, description FROM input i, json_populate_record(NULL::products, i.j) t) WHERE (("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 1) AND (("products"."id") = 15) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func delete(t *testing.T) {
|
||||
gql := `mutation {
|
||||
product(delete: true, where: { id: { eq: 1 } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `WITH "products" AS (DELETE FROM products WHERE (("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 1) RETURNING *) SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products") AS "products_0") AS "done_1337";`
|
||||
|
||||
vars := map[string]json.RawMessage{
|
||||
"update": json.RawMessage(` { "name": "my_name", "woo": { "hoo": "goo" }, "description": "my_desc" }`),
|
||||
}
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, vars)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompileInsert(t *testing.T) {
|
||||
t.Run("simpleInsert", simpleInsert)
|
||||
t.Run("singleInsert", singleInsert)
|
||||
t.Run("bulkInsert", bulkInsert)
|
||||
t.Run("singleUpdate", singleUpdate)
|
||||
t.Run("delete", delete)
|
||||
}
|
@ -1,3 +1,3 @@
|
||||
#!/bin/sh
|
||||
go test -bench=. -benchmem -cpuprofile cpu.out
|
||||
go test -bench=. -benchmem -cpuprofile cpu.out -run=XXX
|
||||
go tool pprof -cum cpu.out
|
@ -1,3 +1,3 @@
|
||||
#!/bin/sh
|
||||
go test -bench=. -benchmem -memprofile mem.out
|
||||
go test -bench=. -benchmem -memprofile mem.out -run=XXX
|
||||
go tool pprof -cum mem.out
|
674
psql/psql.go
@ -1,674 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Schema *DBSchema
|
||||
Vars map[string]string
|
||||
TableMap map[string]string
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
schema *DBSchema
|
||||
vars map[string]string
|
||||
tmap map[string]string
|
||||
}
|
||||
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{conf.Schema, conf.Vars, conf.TableMap}
|
||||
}
|
||||
|
||||
func (c *Compiler) Compile(w io.Writer, qc *qcode.QCode) error {
|
||||
st := util.NewStack()
|
||||
ti, err := c.getTable(qc.Query.Select)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st.Push(&selectBlockClose{nil, qc.Query.Select})
|
||||
st.Push(&selectBlock{nil, qc.Query.Select, ti, c})
|
||||
|
||||
fmt.Fprintf(w, `SELECT json_object_agg('%s', %s) FROM (`,
|
||||
qc.Query.Select.FieldName, qc.Query.Select.Table)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
switch v := intf.(type) {
|
||||
case *selectBlock:
|
||||
childCols, childIDs := c.relationshipColumns(v.sel)
|
||||
v.render(w, c.schema, childCols, childIDs)
|
||||
|
||||
for i := range childIDs {
|
||||
sub := v.sel.Joins[childIDs[i]]
|
||||
|
||||
ti, err := c.getTable(sub)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st.Push(&joinClose{sub})
|
||||
st.Push(&selectBlockClose{v.sel, sub})
|
||||
st.Push(&selectBlock{v.sel, sub, ti, c})
|
||||
st.Push(&joinOpen{sub})
|
||||
}
|
||||
case *selectBlockClose:
|
||||
v.render(w)
|
||||
|
||||
case *joinOpen:
|
||||
v.render(w)
|
||||
|
||||
case *joinClose:
|
||||
v.render(w)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(w, `) AS "done_1337";`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Compiler) getTable(sel *qcode.Select) (*DBTableInfo, error) {
|
||||
if tn, ok := c.tmap[sel.Table]; ok {
|
||||
return c.schema.GetTable(tn)
|
||||
}
|
||||
return c.schema.GetTable(sel.Table)
|
||||
}
|
||||
|
||||
func (c *Compiler) relationshipColumns(parent *qcode.Select) (
|
||||
cols []*qcode.Column, childIDs []int) {
|
||||
|
||||
colmap := make(map[string]struct{}, len(parent.Cols))
|
||||
for i := range parent.Cols {
|
||||
colmap[parent.Cols[i].Name] = struct{}{}
|
||||
}
|
||||
|
||||
for i, sub := range parent.Joins {
|
||||
k := TTKey{sub.Table, parent.Table}
|
||||
|
||||
rel, ok := c.schema.RelMap[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if rel.Type == RelBelongTo || rel.Type == RelOneToMany {
|
||||
if _, ok := colmap[rel.Col2]; !ok {
|
||||
cols = append(cols, &qcode.Column{parent.Table, rel.Col2, rel.Col2})
|
||||
}
|
||||
childIDs = append(childIDs, i)
|
||||
}
|
||||
|
||||
if rel.Type == RelOneToManyThrough {
|
||||
if _, ok := colmap[rel.Col1]; !ok {
|
||||
cols = append(cols, &qcode.Column{parent.Table, rel.Col1, rel.Col1})
|
||||
}
|
||||
childIDs = append(childIDs, i)
|
||||
}
|
||||
}
|
||||
|
||||
return cols, childIDs
|
||||
}
|
||||
|
||||
type selectBlock struct {
|
||||
parent *qcode.Select
|
||||
sel *qcode.Select
|
||||
ti *DBTableInfo
|
||||
*Compiler
|
||||
}
|
||||
|
||||
func (v *selectBlock) render(w io.Writer,
|
||||
schema *DBSchema, childCols []*qcode.Column, childIDs []int) error {
|
||||
|
||||
hasOrder := len(v.sel.OrderBy) != 0
|
||||
|
||||
// SELECT
|
||||
if v.sel.AsList {
|
||||
fmt.Fprintf(w, `SELECT coalesce(json_agg("%s"`, v.sel.Table)
|
||||
|
||||
if hasOrder {
|
||||
err := renderOrderBy(w, v.sel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, `), '[]') AS "%s" FROM (`, v.sel.Table)
|
||||
}
|
||||
|
||||
// ROW-TO-JSON
|
||||
io.WriteString(w, `SELECT `)
|
||||
|
||||
if len(v.sel.DistinctOn) != 0 {
|
||||
v.renderDistinctOn(w)
|
||||
}
|
||||
|
||||
io.WriteString(w, `row_to_json((`)
|
||||
|
||||
fmt.Fprintf(w, `SELECT "sel_%d" FROM (SELECT `, v.sel.ID)
|
||||
|
||||
// Combined column names
|
||||
v.renderColumns(w)
|
||||
|
||||
err := v.renderJoinedColumns(w, childIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, `) AS "sel_%d"`, v.sel.ID)
|
||||
|
||||
fmt.Fprintf(w, `)) AS "%s"`, v.sel.Table)
|
||||
// END-ROW-TO-JSON
|
||||
|
||||
if hasOrder {
|
||||
v.renderOrderByColumns(w)
|
||||
}
|
||||
// END-SELECT
|
||||
|
||||
// FROM (SELECT .... )
|
||||
err = v.renderBaseSelect(w, schema, childCols, childIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// END-FROM
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type selectBlockClose struct {
|
||||
parent *qcode.Select
|
||||
sel *qcode.Select
|
||||
}
|
||||
|
||||
func (v *selectBlockClose) render(w io.Writer) error {
|
||||
hasOrder := len(v.sel.OrderBy) != 0
|
||||
|
||||
if hasOrder {
|
||||
err := renderOrderBy(w, v.sel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.sel.Paging.Limit) != 0 {
|
||||
fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, v.sel.Paging.Limit)
|
||||
} else {
|
||||
io.WriteString(w, ` LIMIT ('20') :: integer`)
|
||||
}
|
||||
|
||||
if len(v.sel.Paging.Offset) != 0 {
|
||||
fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, v.sel.Paging.Offset)
|
||||
}
|
||||
|
||||
if v.sel.AsList {
|
||||
fmt.Fprintf(w, `) AS "%s_%d"`, v.sel.Table, v.sel.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type joinOpen struct {
|
||||
sel *qcode.Select
|
||||
}
|
||||
|
||||
func (v joinOpen) render(w io.Writer) error {
|
||||
io.WriteString(w, ` LEFT OUTER JOIN LATERAL (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
type joinClose struct {
|
||||
sel *qcode.Select
|
||||
}
|
||||
|
||||
func (v *joinClose) render(w io.Writer) error {
|
||||
fmt.Fprintf(w, `) AS "%s_%d.join" ON ('true')`, v.sel.Table, v.sel.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderJoinTable(w io.Writer, schema *DBSchema, childIDs []int) {
|
||||
k := TTKey{v.sel.Table, v.parent.Table}
|
||||
rel, ok := schema.RelMap[k]
|
||||
if !ok {
|
||||
panic(errors.New("no relationship found"))
|
||||
}
|
||||
|
||||
if rel.Type != RelOneToManyThrough {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
rel.Through, rel.Through, rel.ColT, v.parent.Table, v.parent.ID, rel.Col1)
|
||||
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderColumns(w io.Writer) {
|
||||
for i, col := range v.sel.Cols {
|
||||
fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
|
||||
v.sel.Table, v.sel.ID, col.Name, col.FieldName)
|
||||
|
||||
if i < len(v.sel.Cols)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderJoinedColumns(w io.Writer, childIDs []int) error {
|
||||
if len(v.sel.Cols) != 0 && len(childIDs) != 0 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
|
||||
for i := range childIDs {
|
||||
s := v.sel.Joins[childIDs[i]]
|
||||
|
||||
fmt.Fprintf(w, `"%s_%d.join"."%s" AS "%s"`,
|
||||
s.Table, s.ID, s.Table, s.FieldName)
|
||||
|
||||
if i < len(childIDs)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderBaseSelect(w io.Writer, schema *DBSchema, childCols []*qcode.Column, childIDs []int) error {
|
||||
var groupBy []int
|
||||
|
||||
isRoot := v.parent == nil
|
||||
isFil := v.sel.Where != nil
|
||||
isSearch := v.sel.Args["search"] != nil
|
||||
isAgg := false
|
||||
|
||||
io.WriteString(w, " FROM (SELECT ")
|
||||
|
||||
for i, col := range v.sel.Cols {
|
||||
cn := col.Name
|
||||
|
||||
_, isRealCol := v.ti.Columns[cn]
|
||||
|
||||
if !isRealCol {
|
||||
if isSearch {
|
||||
switch {
|
||||
case cn == "search_rank":
|
||||
cn = v.ti.TSVCol
|
||||
arg := v.sel.Args["search"]
|
||||
|
||||
fmt.Fprintf(w, `ts_rank("%s"."%s", to_tsquery('%s')) AS %s`,
|
||||
v.sel.Table, cn, arg.Val, col.Name)
|
||||
|
||||
case strings.HasPrefix(cn, "search_headline_"):
|
||||
cn = cn[16:]
|
||||
arg := v.sel.Args["search"]
|
||||
|
||||
fmt.Fprintf(w, `ts_headline("%s"."%s", to_tsquery('%s')) AS %s`,
|
||||
v.sel.Table, cn, arg.Val, col.Name)
|
||||
}
|
||||
} else {
|
||||
pl := funcPrefixLen(cn)
|
||||
if pl == 0 {
|
||||
fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
} else {
|
||||
isAgg = true
|
||||
fn := cn[0 : pl-1]
|
||||
cn := cn[pl:]
|
||||
fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, v.sel.Table, cn, col.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
groupBy = append(groupBy, i)
|
||||
fmt.Fprintf(w, `"%s"."%s"`, v.sel.Table, cn)
|
||||
}
|
||||
|
||||
if i < len(v.sel.Cols)-1 || len(childCols) != 0 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
|
||||
for i, col := range childCols {
|
||||
fmt.Fprintf(w, `"%s"."%s"`, col.Table, col.Name)
|
||||
|
||||
if i < len(childCols)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
|
||||
if tn, ok := v.tmap[v.sel.Table]; ok {
|
||||
fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, v.sel.Table)
|
||||
} else {
|
||||
fmt.Fprintf(w, ` FROM "%s"`, v.sel.Table)
|
||||
}
|
||||
|
||||
if isRoot && isFil {
|
||||
io.WriteString(w, ` WHERE (`)
|
||||
if err := v.renderWhere(w); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
if !isRoot {
|
||||
v.renderJoinTable(w, schema, childIDs)
|
||||
|
||||
io.WriteString(w, ` WHERE (`)
|
||||
v.renderRelationship(w, schema)
|
||||
|
||||
if isFil {
|
||||
io.WriteString(w, ` AND `)
|
||||
if err := v.renderWhere(w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
if isAgg {
|
||||
if len(groupBy) != 0 {
|
||||
fmt.Fprintf(w, ` GROUP BY `)
|
||||
|
||||
for i, id := range groupBy {
|
||||
fmt.Fprintf(w, `"%s"."%s"`, v.sel.Table, v.sel.Cols[id].Name)
|
||||
|
||||
if i < len(groupBy)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.sel.Paging.Limit) != 0 {
|
||||
fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, v.sel.Paging.Limit)
|
||||
} else {
|
||||
io.WriteString(w, ` LIMIT ('20') :: integer`)
|
||||
}
|
||||
|
||||
if len(v.sel.Paging.Offset) != 0 {
|
||||
fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, v.sel.Paging.Offset)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, `) AS "%s_%d"`, v.sel.Table, v.sel.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderOrderByColumns(w io.Writer) {
|
||||
if len(v.sel.Cols) != 0 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
|
||||
for i := range v.sel.OrderBy {
|
||||
c := v.sel.OrderBy[i].Col
|
||||
fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d.ob.%s"`,
|
||||
v.sel.Table, v.sel.ID, c,
|
||||
v.sel.Table, v.sel.ID, c)
|
||||
|
||||
if i < len(v.sel.OrderBy)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderRelationship(w io.Writer, schema *DBSchema) {
|
||||
k := TTKey{v.sel.Table, v.parent.Table}
|
||||
rel, ok := schema.RelMap[k]
|
||||
if !ok {
|
||||
panic(errors.New("no relationship found"))
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelBelongTo:
|
||||
fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
v.sel.Table, rel.Col1, v.parent.Table, v.parent.ID, rel.Col2)
|
||||
|
||||
case RelOneToMany:
|
||||
fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
v.sel.Table, rel.Col1, v.parent.Table, v.parent.ID, rel.Col2)
|
||||
|
||||
case RelOneToManyThrough:
|
||||
fmt.Fprintf(w, `(("%s"."%s") = ("%s"."%s"))`,
|
||||
v.sel.Table, rel.Col1, rel.Through, rel.Col2)
|
||||
}
|
||||
}
|
||||
|
||||
func (v *selectBlock) renderWhere(w io.Writer) error {
|
||||
st := util.NewStack()
|
||||
|
||||
if v.sel.Where != nil {
|
||||
st.Push(v.sel.Where)
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
switch val := intf.(type) {
|
||||
case qcode.ExpOp:
|
||||
switch val {
|
||||
case qcode.OpAnd:
|
||||
io.WriteString(w, ` AND `)
|
||||
case qcode.OpOr:
|
||||
io.WriteString(w, ` OR `)
|
||||
case qcode.OpNot:
|
||||
io.WriteString(w, `NOT `)
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected value encountered %v", intf)
|
||||
}
|
||||
case *qcode.Exp:
|
||||
switch val.Op {
|
||||
case qcode.OpAnd, qcode.OpOr:
|
||||
for i := len(val.Children) - 1; i >= 0; i-- {
|
||||
st.Push(val.Children[i])
|
||||
if i > 0 {
|
||||
st.Push(val.Op)
|
||||
}
|
||||
}
|
||||
continue
|
||||
case qcode.OpNot:
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
continue
|
||||
}
|
||||
|
||||
if val.NestedCol {
|
||||
fmt.Fprintf(w, `(("%s") `, val.Col)
|
||||
} else if len(val.Col) != 0 {
|
||||
fmt.Fprintf(w, `(("%s"."%s") `, v.sel.Table, val.Col)
|
||||
}
|
||||
valExists := true
|
||||
|
||||
switch val.Op {
|
||||
case qcode.OpEquals:
|
||||
io.WriteString(w, `=`)
|
||||
case qcode.OpNotEquals:
|
||||
io.WriteString(w, `!=`)
|
||||
case qcode.OpGreaterOrEquals:
|
||||
io.WriteString(w, `>=`)
|
||||
case qcode.OpLesserOrEquals:
|
||||
io.WriteString(w, `<=`)
|
||||
case qcode.OpGreaterThan:
|
||||
io.WriteString(w, `>`)
|
||||
case qcode.OpLesserThan:
|
||||
io.WriteString(w, `<`)
|
||||
case qcode.OpIn:
|
||||
io.WriteString(w, `IN`)
|
||||
case qcode.OpNotIn:
|
||||
io.WriteString(w, `NOT IN`)
|
||||
case qcode.OpLike:
|
||||
io.WriteString(w, `LIKE`)
|
||||
case qcode.OpNotLike:
|
||||
io.WriteString(w, `NOT LIKE`)
|
||||
case qcode.OpILike:
|
||||
io.WriteString(w, `ILIKE`)
|
||||
case qcode.OpNotILike:
|
||||
io.WriteString(w, `NOT ILIKE`)
|
||||
case qcode.OpSimilar:
|
||||
io.WriteString(w, `SIMILAR TO`)
|
||||
case qcode.OpNotSimilar:
|
||||
io.WriteString(w, `NOT SIMILAR TO`)
|
||||
case qcode.OpContains:
|
||||
io.WriteString(w, `@>`)
|
||||
case qcode.OpContainedIn:
|
||||
io.WriteString(w, `<@`)
|
||||
case qcode.OpHasKey:
|
||||
io.WriteString(w, `?`)
|
||||
case qcode.OpHasKeyAny:
|
||||
io.WriteString(w, `?|`)
|
||||
case qcode.OpHasKeyAll:
|
||||
io.WriteString(w, `?&`)
|
||||
case qcode.OpIsNull:
|
||||
if strings.EqualFold(val.Val, "true") {
|
||||
io.WriteString(w, `IS NULL)`)
|
||||
} else {
|
||||
io.WriteString(w, `IS NOT NULL)`)
|
||||
}
|
||||
valExists = false
|
||||
case qcode.OpEqID:
|
||||
if len(v.ti.PrimaryCol) == 0 {
|
||||
return fmt.Errorf("no primary key column defined for %s", v.sel.Table)
|
||||
}
|
||||
fmt.Fprintf(w, `(("%s") = ('%s'))`, v.ti.PrimaryCol, val.Val)
|
||||
valExists = false
|
||||
case qcode.OpTsQuery:
|
||||
if len(v.ti.TSVCol) == 0 {
|
||||
return fmt.Errorf("no tsv column defined for %s", v.sel.Table)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, `(("%s") @@ to_tsquery('%s'))`, v.ti.TSVCol, val.Val)
|
||||
valExists = false
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected op code %d", val.Op)
|
||||
}
|
||||
|
||||
if valExists {
|
||||
if val.Type == qcode.ValList {
|
||||
renderList(w, val)
|
||||
} else {
|
||||
renderVal(w, val, v.vars)
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected value encountered %v", intf)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renderOrderBy(w io.Writer, sel *qcode.Select) error {
|
||||
io.WriteString(w, ` ORDER BY `)
|
||||
for i := range sel.OrderBy {
|
||||
ob := sel.OrderBy[i]
|
||||
|
||||
switch ob.Order {
|
||||
case qcode.OrderAsc:
|
||||
fmt.Fprintf(w, `"%s_%d.ob.%s" ASC`, sel.Table, sel.ID, ob.Col)
|
||||
case qcode.OrderDesc:
|
||||
fmt.Fprintf(w, `"%s_%d.ob.%s" DESC`, sel.Table, sel.ID, ob.Col)
|
||||
case qcode.OrderAscNullsFirst:
|
||||
fmt.Fprintf(w, `"%s_%d.ob.%s" ASC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
|
||||
case qcode.OrderDescNullsFirst:
|
||||
fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
|
||||
case qcode.OrderAscNullsLast:
|
||||
fmt.Fprintf(w, `"%s_%d.ob.%s ASC NULLS LAST`, sel.Table, sel.ID, ob.Col)
|
||||
case qcode.OrderDescNullsLast:
|
||||
fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Table, sel.ID, ob.Col)
|
||||
default:
|
||||
return fmt.Errorf("[qcode.Order By] unexpected value encountered %v", ob.Order)
|
||||
}
|
||||
if i < len(sel.OrderBy)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v selectBlock) renderDistinctOn(w io.Writer) {
|
||||
io.WriteString(w, ` DISTINCT ON (`)
|
||||
for i := range v.sel.DistinctOn {
|
||||
fmt.Fprintf(w, `"%s_%d.ob.%s"`,
|
||||
v.sel.Table, v.sel.ID, v.sel.DistinctOn[i])
|
||||
|
||||
if i < len(v.sel.DistinctOn)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
io.WriteString(w, `) `)
|
||||
}
|
||||
|
||||
func renderList(w io.Writer, ex *qcode.Exp) {
|
||||
io.WriteString(w, ` (`)
|
||||
for i := range ex.ListVal {
|
||||
switch ex.ListType {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
io.WriteString(w, ex.ListVal[i])
|
||||
case qcode.ValStr:
|
||||
fmt.Fprintf(w, `'%s'`, ex.ListVal[i])
|
||||
}
|
||||
|
||||
if i < len(ex.ListVal)-1 {
|
||||
io.WriteString(w, ", ")
|
||||
}
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
func renderVal(w io.Writer, ex *qcode.Exp, vars map[string]string) {
|
||||
io.WriteString(w, ` (`)
|
||||
switch ex.Type {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
io.WriteString(w, ex.Val)
|
||||
case qcode.ValStr:
|
||||
fmt.Fprintf(w, `'%s'`, ex.Val)
|
||||
case qcode.ValVar:
|
||||
if val, ok := vars[ex.Val]; ok {
|
||||
io.WriteString(w, val)
|
||||
} else {
|
||||
fmt.Fprintf(w, `'{{%s}}'`, ex.Val)
|
||||
}
|
||||
}
|
||||
io.WriteString(w, `)`)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "count_"):
|
||||
return 6
|
||||
case strings.HasPrefix(fn, "max_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "min_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "sum_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "stddev_"):
|
||||
return 7
|
||||
case strings.HasPrefix(fn, "stddev_pop_"):
|
||||
return 11
|
||||
case strings.HasPrefix(fn, "stddev_samp_"):
|
||||
return 12
|
||||
case strings.HasPrefix(fn, "variance_"):
|
||||
return 9
|
||||
case strings.HasPrefix(fn, "var_pop_"):
|
||||
return 8
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
return 0
|
||||
}
|
@ -1,496 +0,0 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
const (
|
||||
errNotExpected = "Generated SQL did not match what was expected"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *Compiler
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
|
||||
qcompile, err = qcode.NewCompiler(qcode.Config{
|
||||
Filter: []string{
|
||||
`{ user_id: { _eq: $user_id } }`,
|
||||
},
|
||||
FilterMap: map[string][]string{
|
||||
"users": []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
"products": []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
"customers": []string{},
|
||||
"mes": []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
},
|
||||
Blacklist: []string{
|
||||
"secret",
|
||||
"password",
|
||||
"token",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tables := []*DBTable{
|
||||
&DBTable{Name: "customers", Type: "table"},
|
||||
&DBTable{Name: "users", Type: "table"},
|
||||
&DBTable{Name: "products", Type: "table"},
|
||||
&DBTable{Name: "purchases", Type: "table"},
|
||||
}
|
||||
|
||||
columns := [][]*DBColumn{
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "users", FKeyColID: []int{1}},
|
||||
&DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "customers", FKeyColID: []int{1}},
|
||||
&DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "products", FKeyColID: []int{1}},
|
||||
&DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)},
|
||||
&DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, Uniquekey: false, FKeyTable: "", FKeyColID: []int(nil)}},
|
||||
}
|
||||
|
||||
schema := &DBSchema{
|
||||
Tables: make(map[string]*DBTableInfo),
|
||||
RelMap: make(map[TTKey]*DBRel),
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
schema.updateSchema(t, columns[i])
|
||||
}
|
||||
|
||||
vars := NewVariables(map[string]string{
|
||||
"account_id": "select account_id from users where id = $user_id",
|
||||
})
|
||||
|
||||
pcompile = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
TableMap: map[string]string{
|
||||
"mes": "users",
|
||||
},
|
||||
})
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(gql string) (string, error) {
|
||||
qc, err := qcompile.CompileQuery(gql)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var sqlStmt strings.Builder
|
||||
|
||||
if err := pcompile.Compile(&sqlStmt, qc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return sqlStmt.String(), nil
|
||||
}
|
||||
|
||||
func withComplexArgs(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products" ORDER BY "products_0.ob.price" DESC), '[]') AS "products" FROM (SELECT DISTINCT ON ("products_0.ob.price") row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "products", "products_0"."price" AS "products_0.ob.price" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("products"."id") < (28)) AND (("products"."id") >= (20))) LIMIT ('30') :: integer) AS "products_0" ORDER BY "products_0.ob.price" DESC LIMIT ('30') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereMultiOr(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "products" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("products"."price") < (20)) OR (("products"."price") > (10)) OR NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereIsNull(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "products" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("products"."price") > (10)) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereAndList(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: [
|
||||
{ not: { id: { is_null: true } } },
|
||||
{ price: { gt: 10 } },
|
||||
] } ) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "products" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("products"."price") > (10)) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func oneToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
email
|
||||
products {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('users', users) FROM (SELECT coalesce(json_agg("users"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email", "products_1.join"."products" AS "products") AS "sel_0")) AS "users" FROM (SELECT "users"."email", "users"."id" FROM "users" WHERE ((("users"."id") = ('{{user_id}}'))) LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price") AS "sel_1")) AS "products" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "products_1") AS "products_1.join" ON ('true') LIMIT ('20') :: integer) AS "users_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func belongsTo(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "users_1.join"."users" AS "users") AS "sel_0")) AS "products" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("users"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "users_1"."email" AS "email") AS "sel_1")) AS "users" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1" LIMIT ('20') :: integer) AS "users_1") AS "users_1.join" ON ('true') LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func manyToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "customers_1.join"."customers" AS "customers") AS "sel_0")) AS "products" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8))) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("customers"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name") AS "sel_1")) AS "customers" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1" LIMIT ('20') :: integer) AS "customers_1") AS "customers_1.join" ON ('true') LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func manyToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
products {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('customers', customers) FROM (SELECT coalesce(json_agg("customers"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "products_1.join"."products" AS "products") AS "sel_0")) AS "customers" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name") AS "sel_1")) AS "products" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "products_1") AS "products_1.join" ON ('true') LIMIT ('20') :: integer) AS "customers_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func fetchByID(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: 15) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('product', products) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "products" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("id") = ('15'))) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func searchQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
products(search: "Imperial") {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "products" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("tsv") @@ to_tsquery('Imperial'))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func aggFunction(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price") AS "sel_0")) AS "products" FROM (SELECT "products"."name", count("products"."price") AS count_price FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8))) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func aggFunctionWithFilter(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { gt: 10 } }) {
|
||||
id
|
||||
max_price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("products"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price") AS "sel_0")) AS "products" FROM (SELECT "products"."id", max("products"."price") AS max_price FROM "products" WHERE ((("products"."price") > (0)) AND (("products"."price") < (8)) AND (("products"."id") > (10))) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "products_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('me', mes) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "mes_0"."email" AS "email") AS "sel_0")) AS "mes" FROM (SELECT "mes"."email" FROM "users" AS "mes" WHERE ((("mes"."id") = ('{{user_id}}'))) LIMIT ('1') :: integer) AS "mes_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resSQL != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompileGQL(t *testing.T) {
|
||||
t.Run("withComplexArgs", withComplexArgs)
|
||||
t.Run("withWhereAndList", withWhereAndList)
|
||||
t.Run("withWhereIsNull", withWhereIsNull)
|
||||
t.Run("withWhereMultiOr", withWhereMultiOr)
|
||||
t.Run("fetchByID", fetchByID)
|
||||
t.Run("searchQuery", searchQuery)
|
||||
t.Run("belongsTo", belongsTo)
|
||||
t.Run("oneToMany", oneToMany)
|
||||
t.Run("manyToMany", manyToMany)
|
||||
t.Run("manyToManyReverse", manyToManyReverse)
|
||||
t.Run("aggFunction", aggFunction)
|
||||
t.Run("aggFunctionWithFilter", aggFunctionWithFilter)
|
||||
t.Run("syntheticTables", syntheticTables)
|
||||
}
|
||||
|
||||
func BenchmarkCompileGQLToSQL(b *testing.B) {
|
||||
gql := `query {
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
user {
|
||||
full_name
|
||||
picture : avatar
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := compileGQLToPSQL(gql)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
1066
psql/select.go
Normal file
@ -0,0 +1,1066 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
|
||||
const (
|
||||
empty = ""
|
||||
closeBlock = 500
|
||||
)
|
||||
|
||||
type Variables map[string]json.RawMessage
|
||||
|
||||
type Config struct {
|
||||
Schema *DBSchema
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
schema *DBSchema
|
||||
vars map[string]string
|
||||
}
|
||||
|
||||
func NewCompiler(conf Config) *Compiler {
|
||||
return &Compiler{conf.Schema, conf.Vars}
|
||||
}
|
||||
|
||||
func (c *Compiler) AddRelationship(child, parent string, rel *DBRel) error {
|
||||
return c.schema.SetRel(child, parent, rel)
|
||||
}
|
||||
|
||||
func (c *Compiler) IDColumn(table string) (string, error) {
|
||||
t, err := c.schema.GetTable(table)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
return t.PrimaryCol, nil
|
||||
}
|
||||
|
||||
type compilerContext struct {
|
||||
w *bytes.Buffer
|
||||
s []qcode.Select
|
||||
*Compiler
|
||||
}
|
||||
|
||||
func (co *Compiler) CompileEx(qc *qcode.QCode, vars Variables) (uint32, []byte, error) {
|
||||
w := &bytes.Buffer{}
|
||||
skipped, err := co.Compile(qc, w, vars)
|
||||
return skipped, w.Bytes(), err
|
||||
}
|
||||
|
||||
func (co *Compiler) Compile(qc *qcode.QCode, w *bytes.Buffer, vars Variables) (uint32, error) {
|
||||
switch qc.Type {
|
||||
case qcode.QTQuery:
|
||||
return co.compileQuery(qc, w)
|
||||
case qcode.QTMutation:
|
||||
return co.compileMutation(qc, w, vars)
|
||||
}
|
||||
|
||||
return 0, errors.New("unknown operation")
|
||||
}
|
||||
|
||||
func (co *Compiler) compileQuery(qc *qcode.QCode, w *bytes.Buffer) (uint32, error) {
|
||||
if len(qc.Selects) == 0 {
|
||||
return 0, errors.New("empty query")
|
||||
}
|
||||
|
||||
c := &compilerContext{w, qc.Selects, co}
|
||||
root := &qc.Selects[0]
|
||||
|
||||
ti, err := c.schema.GetTable(root.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
st := NewStack()
|
||||
st.Push(root.ID + closeBlock)
|
||||
st.Push(root.ID)
|
||||
|
||||
//fmt.Fprintf(w, `SELECT json_object_agg('%s', %s) FROM (`,
|
||||
//root.FieldName, root.Table)
|
||||
c.w.WriteString(`SELECT json_object_agg('`)
|
||||
c.w.WriteString(root.FieldName)
|
||||
c.w.WriteString(`', `)
|
||||
|
||||
if ti.Singular == false {
|
||||
c.w.WriteString(root.Table)
|
||||
} else {
|
||||
c.w.WriteString("sel_json_")
|
||||
int2string(c.w, root.ID)
|
||||
}
|
||||
c.w.WriteString(`) FROM (`)
|
||||
|
||||
var ignored uint32
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
id := st.Pop()
|
||||
|
||||
if id < closeBlock {
|
||||
sel := &c.s[id]
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if sel.ID != 0 {
|
||||
if err = c.renderJoin(sel); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
skipped, err := c.renderSelect(sel, ti)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ignored |= skipped
|
||||
|
||||
for _, cid := range sel.Children {
|
||||
if hasBit(skipped, uint32(cid)) {
|
||||
continue
|
||||
}
|
||||
child := &c.s[cid]
|
||||
|
||||
st.Push(child.ID + closeBlock)
|
||||
st.Push(child.ID)
|
||||
}
|
||||
|
||||
} else {
|
||||
sel := &c.s[(id - closeBlock)]
|
||||
|
||||
ti, err := c.schema.GetTable(sel.Table)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = c.renderSelectClose(sel, ti)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if sel.ID != 0 {
|
||||
if err = c.renderJoinClose(sel); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.w.WriteString(`)`)
|
||||
alias(c.w, `done_1337`)
|
||||
c.w.WriteString(`;`)
|
||||
|
||||
return ignored, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) processChildren(sel *qcode.Select, ti *DBTableInfo) (uint32, []*qcode.Column) {
|
||||
var skipped uint32
|
||||
|
||||
cols := make([]*qcode.Column, 0, len(sel.Cols))
|
||||
colmap := make(map[string]struct{}, len(sel.Cols))
|
||||
|
||||
for i := range sel.Cols {
|
||||
colmap[sel.Cols[i].Name] = struct{}{}
|
||||
}
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Table, ti.Name)
|
||||
if err != nil {
|
||||
skipped |= (1 << uint(id))
|
||||
continue
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelOneToMany:
|
||||
fallthrough
|
||||
case RelBelongTo:
|
||||
if _, ok := colmap[rel.Col2]; !ok {
|
||||
cols = append(cols, &qcode.Column{ti.Name, rel.Col2, rel.Col2})
|
||||
}
|
||||
case RelOneToManyThrough:
|
||||
if _, ok := colmap[rel.Col1]; !ok {
|
||||
cols = append(cols, &qcode.Column{ti.Name, rel.Col1, rel.Col1})
|
||||
}
|
||||
case RelRemote:
|
||||
if _, ok := colmap[rel.Col1]; !ok {
|
||||
cols = append(cols, &qcode.Column{ti.Name, rel.Col1, rel.Col2})
|
||||
}
|
||||
skipped |= (1 << uint(id))
|
||||
|
||||
default:
|
||||
skipped |= (1 << uint(id))
|
||||
}
|
||||
}
|
||||
|
||||
return skipped, cols
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelect(sel *qcode.Select, ti *DBTableInfo) (uint32, error) {
|
||||
skipped, childCols := c.processChildren(sel, ti)
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
|
||||
// SELECT
|
||||
if ti.Singular == false {
|
||||
//fmt.Fprintf(w, `SELECT coalesce(json_agg("%s"`, c.sel.Table)
|
||||
c.w.WriteString(`SELECT coalesce(json_agg("`)
|
||||
c.w.WriteString("sel_json_")
|
||||
int2string(c.w, sel.ID)
|
||||
c.w.WriteString(`"`)
|
||||
|
||||
if hasOrder {
|
||||
err := c.renderOrderBy(sel, ti)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `), '[]') AS "%s" FROM (`, c.sel.Table)
|
||||
c.w.WriteString(`), '[]')`)
|
||||
alias(c.w, sel.Table)
|
||||
c.w.WriteString(` FROM (`)
|
||||
}
|
||||
|
||||
// ROW-TO-JSON
|
||||
c.w.WriteString(`SELECT `)
|
||||
|
||||
if len(sel.DistinctOn) != 0 {
|
||||
c.renderDistinctOn(sel, ti)
|
||||
}
|
||||
|
||||
c.w.WriteString(`row_to_json((`)
|
||||
|
||||
//fmt.Fprintf(w, `SELECT "sel_%d" FROM (SELECT `, c.sel.ID)
|
||||
c.w.WriteString(`SELECT "sel_`)
|
||||
int2string(c.w, sel.ID)
|
||||
c.w.WriteString(`" FROM (SELECT `)
|
||||
|
||||
// Combined column names
|
||||
c.renderColumns(sel, ti)
|
||||
|
||||
c.renderRemoteRelColumns(sel, ti)
|
||||
|
||||
err := c.renderJoinedColumns(sel, ti, skipped)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "sel_%d"`, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithID(c.w, "sel", sel.ID)
|
||||
|
||||
//fmt.Fprintf(w, `)) AS "%s"`, c.sel.Table)
|
||||
c.w.WriteString(`))`)
|
||||
aliasWithID(c.w, "sel_json", sel.ID)
|
||||
// END-ROW-TO-JSON
|
||||
|
||||
if hasOrder {
|
||||
c.renderOrderByColumns(sel, ti)
|
||||
}
|
||||
// END-SELECT
|
||||
|
||||
// FROM (SELECT .... )
|
||||
err = c.renderBaseSelect(sel, ti, childCols, skipped)
|
||||
if err != nil {
|
||||
return skipped, err
|
||||
}
|
||||
// END-FROM
|
||||
|
||||
return skipped, nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderSelectClose(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
hasOrder := len(sel.OrderBy) != 0
|
||||
|
||||
if hasOrder {
|
||||
err := c.renderOrderBy(sel, ti)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Action == 0 {
|
||||
if len(sel.Paging.Limit) != 0 {
|
||||
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
|
||||
c.w.WriteString(` LIMIT ('`)
|
||||
c.w.WriteString(sel.Paging.Limit)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
|
||||
} else if ti.Singular {
|
||||
c.w.WriteString(` LIMIT ('1') :: integer`)
|
||||
|
||||
} else {
|
||||
c.w.WriteString(` LIMIT ('20') :: integer`)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sel.Paging.Offset) != 0 {
|
||||
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
|
||||
c.w.WriteString(`OFFSET ('`)
|
||||
c.w.WriteString(sel.Paging.Offset)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
}
|
||||
|
||||
if ti.Singular == false {
|
||||
//fmt.Fprintf(w, `) AS "sel_json_agg_%d"`, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithID(c.w, "sel_json_agg", sel.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoin(sel *qcode.Select) error {
|
||||
c.w.WriteString(` LEFT OUTER JOIN LATERAL (`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinClose(sel *qcode.Select) error {
|
||||
//fmt.Fprintf(w, `) AS "%s_%d_join" ON ('true')`, c.sel.Table, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithIDSuffix(c.w, sel.Table, sel.ID, "_join")
|
||||
c.w.WriteString(` ON ('true')`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinTable(sel *qcode.Select) error {
|
||||
parent := &c.s[sel.ParentID]
|
||||
|
||||
rel, err := c.schema.GetRel(sel.Table, parent.Table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rel.Type != RelOneToManyThrough {
|
||||
return err
|
||||
}
|
||||
|
||||
pt, err := c.schema.GetTable(parent.Table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, ` LEFT OUTER JOIN "%s" ON (("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//rel.Through, rel.Through, rel.ColT, c.parent.Table, c.parent.ID, rel.Col1)
|
||||
c.w.WriteString(` LEFT OUTER JOIN "`)
|
||||
c.w.WriteString(rel.Through)
|
||||
c.w.WriteString(`" ON ((`)
|
||||
colWithTable(c.w, rel.Through, rel.ColT)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTableID(c.w, pt.Name, parent.ID, rel.Col1)
|
||||
c.w.WriteString(`))`)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderColumns(sel *qcode.Select, ti *DBTableInfo) {
|
||||
for i, col := range sel.Cols {
|
||||
if i != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
|
||||
//c.sel.Table, c.sel.ID, col.Name, col.FieldName)
|
||||
colWithTableIDAlias(c.w, ti.Name, sel.ID, col.Name, col.FieldName)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRemoteRelColumns(sel *qcode.Select, ti *DBTableInfo) {
|
||||
i := 0
|
||||
|
||||
for _, id := range sel.Children {
|
||||
child := &c.s[id]
|
||||
|
||||
rel, err := c.schema.GetRel(child.Table, sel.Table)
|
||||
if err != nil || rel.Type != RelRemote {
|
||||
continue
|
||||
}
|
||||
if i != 0 || len(sel.Cols) != 0 {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s"`,
|
||||
//c.sel.Table, c.sel.ID, rel.Col1, rel.Col2)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, rel.Col1)
|
||||
alias(c.w, rel.Col2)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderJoinedColumns(sel *qcode.Select, ti *DBTableInfo, skipped uint32) error {
|
||||
colsRendered := len(sel.Cols) != 0
|
||||
|
||||
for _, id := range sel.Children {
|
||||
skipThis := hasBit(skipped, uint32(id))
|
||||
|
||||
if colsRendered && !skipThis {
|
||||
io.WriteString(c.w, ", ")
|
||||
}
|
||||
if skipThis {
|
||||
continue
|
||||
}
|
||||
childSel := &c.s[id]
|
||||
|
||||
//fmt.Fprintf(w, `"%s_%d_join"."%s" AS "%s"`,
|
||||
//s.Table, s.ID, s.Table, s.FieldName)
|
||||
colWithTableIDSuffixAlias(c.w, childSel.Table, childSel.ID,
|
||||
"_join", childSel.Table, childSel.FieldName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderBaseSelect(sel *qcode.Select, ti *DBTableInfo,
|
||||
childCols []*qcode.Column, skipped uint32) error {
|
||||
var groupBy []int
|
||||
|
||||
isRoot := sel.ID == 0
|
||||
isFil := sel.Where != nil
|
||||
isSearch := sel.Args["search"] != nil
|
||||
isAgg := false
|
||||
|
||||
c.w.WriteString(` FROM (SELECT `)
|
||||
|
||||
for i, col := range sel.Cols {
|
||||
cn := col.Name
|
||||
|
||||
_, isRealCol := ti.Columns[cn]
|
||||
|
||||
if !isRealCol {
|
||||
if isSearch {
|
||||
switch {
|
||||
case cn == "search_rank":
|
||||
cn = ti.TSVCol
|
||||
arg := sel.Args["search"]
|
||||
|
||||
//fmt.Fprintf(w, `ts_rank("%s"."%s", to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Table, cn, arg.Val, col.Name)
|
||||
c.w.WriteString(`ts_rank(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
c.w.WriteString(`, to_tsquery('`)
|
||||
c.w.WriteString(arg.Val)
|
||||
c.w.WriteString(`')`)
|
||||
alias(c.w, col.Name)
|
||||
|
||||
case strings.HasPrefix(cn, "search_headline_"):
|
||||
cn = cn[16:]
|
||||
arg := sel.Args["search"]
|
||||
|
||||
//fmt.Fprintf(w, `ts_headline("%s"."%s", to_tsquery('%s')) AS %s`,
|
||||
//c.sel.Table, cn, arg.Val, col.Name)
|
||||
c.w.WriteString(`ts_headlinek(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
c.w.WriteString(`, to_tsquery('`)
|
||||
c.w.WriteString(arg.Val)
|
||||
c.w.WriteString(`')`)
|
||||
alias(c.w, col.Name)
|
||||
}
|
||||
} else {
|
||||
pl := funcPrefixLen(cn)
|
||||
if pl == 0 {
|
||||
//fmt.Fprintf(w, `'%s not defined' AS %s`, cn, col.Name)
|
||||
c.w.WriteString(`'`)
|
||||
c.w.WriteString(cn)
|
||||
c.w.WriteString(` not defined'`)
|
||||
alias(c.w, col.Name)
|
||||
} else {
|
||||
isAgg = true
|
||||
fn := cn[0 : pl-1]
|
||||
cn := cn[pl:]
|
||||
//fmt.Fprintf(w, `%s("%s"."%s") AS %s`, fn, c.sel.Table, cn, col.Name)
|
||||
c.w.WriteString(fn)
|
||||
c.w.WriteString(`(`)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
c.w.WriteString(`)`)
|
||||
alias(c.w, col.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
groupBy = append(groupBy, i)
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Table, cn)
|
||||
colWithTable(c.w, ti.Name, cn)
|
||||
}
|
||||
|
||||
if i < len(sel.Cols)-1 || len(childCols) != 0 {
|
||||
//io.WriteString(w, ", ")
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
}
|
||||
|
||||
for i, col := range childCols {
|
||||
if i != 0 {
|
||||
//io.WriteString(w, ", ")
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, col.Table, col.Name)
|
||||
colWithTable(c.w, col.Table, col.Name)
|
||||
}
|
||||
|
||||
c.w.WriteString(` FROM `)
|
||||
|
||||
//fmt.Fprintf(w, ` FROM "%s"`, c.sel.Table)
|
||||
c.w.WriteString(`"`)
|
||||
c.w.WriteString(ti.Name)
|
||||
c.w.WriteString(`"`)
|
||||
|
||||
// if tn, ok := c.tmap[sel.Table]; ok {
|
||||
// //fmt.Fprintf(w, ` FROM "%s" AS "%s"`, tn, c.sel.Table)
|
||||
// tableWithAlias(c.w, ti.Name, sel.Table)
|
||||
// } else {
|
||||
// //fmt.Fprintf(w, ` FROM "%s"`, c.sel.Table)
|
||||
// c.w.WriteString(`"`)
|
||||
// c.w.WriteString(sel.Table)
|
||||
// c.w.WriteString(`"`)
|
||||
// }
|
||||
|
||||
if isRoot && isFil {
|
||||
c.w.WriteString(` WHERE (`)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
if !isRoot {
|
||||
if err := c.renderJoinTable(sel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.w.WriteString(` WHERE (`)
|
||||
|
||||
if err := c.renderRelationship(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isFil {
|
||||
c.w.WriteString(` AND `)
|
||||
if err := c.renderWhere(sel, ti); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
if isAgg {
|
||||
if len(groupBy) != 0 {
|
||||
c.w.WriteString(` GROUP BY `)
|
||||
|
||||
for i, id := range groupBy {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s"."%s"`, c.sel.Table, c.sel.Cols[id].Name)
|
||||
colWithTable(c.w, ti.Name, sel.Cols[id].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sel.Action == 0 {
|
||||
if len(sel.Paging.Limit) != 0 {
|
||||
//fmt.Fprintf(w, ` LIMIT ('%s') :: integer`, c.sel.Paging.Limit)
|
||||
c.w.WriteString(` LIMIT ('`)
|
||||
c.w.WriteString(sel.Paging.Limit)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
|
||||
} else if ti.Singular {
|
||||
c.w.WriteString(` LIMIT ('1') :: integer`)
|
||||
|
||||
} else {
|
||||
c.w.WriteString(` LIMIT ('20') :: integer`)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sel.Paging.Offset) != 0 {
|
||||
//fmt.Fprintf(w, ` OFFSET ('%s') :: integer`, c.sel.Paging.Offset)
|
||||
c.w.WriteString(` OFFSET ('`)
|
||||
c.w.WriteString(sel.Paging.Offset)
|
||||
c.w.WriteString(`') :: integer`)
|
||||
}
|
||||
|
||||
//fmt.Fprintf(w, `) AS "%s_%d"`, c.sel.Table, c.sel.ID)
|
||||
c.w.WriteString(`)`)
|
||||
aliasWithID(c.w, ti.Name, sel.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOrderByColumns(sel *qcode.Select, ti *DBTableInfo) {
|
||||
colsRendered := len(sel.Cols) != 0
|
||||
|
||||
for i := range sel.OrderBy {
|
||||
if colsRendered {
|
||||
//io.WriteString(w, ", ")
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
|
||||
col := sel.OrderBy[i].Col
|
||||
//fmt.Fprintf(w, `"%s_%d"."%s" AS "%s_%d_%s_ob"`,
|
||||
//c.sel.Table, c.sel.ID, c,
|
||||
//c.sel.Table, c.sel.ID, c)
|
||||
colWithTableID(c.w, ti.Name, sel.ID, col)
|
||||
c.w.WriteString(` AS `)
|
||||
tableIDColSuffix(c.w, sel.Table, sel.ID, col, "_ob")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderRelationship(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
parent := c.s[sel.ParentID]
|
||||
|
||||
rel, err := c.schema.GetRel(sel.Table, parent.Table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch rel.Type {
|
||||
case RelBelongTo:
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//c.sel.Table, rel.Col1, c.parent.Table, c.parent.ID, rel.Col2)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, rel.Col1)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTableID(c.w, parent.Table, parent.ID, rel.Col2)
|
||||
c.w.WriteString(`))`)
|
||||
|
||||
case RelOneToMany:
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s_%d"."%s"))`,
|
||||
//c.sel.Table, rel.Col1, c.parent.Table, c.parent.ID, rel.Col2)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, rel.Col1)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTableID(c.w, parent.Table, parent.ID, rel.Col2)
|
||||
c.w.WriteString(`))`)
|
||||
|
||||
case RelOneToManyThrough:
|
||||
//fmt.Fprintf(w, `(("%s"."%s") = ("%s"."%s"))`,
|
||||
//c.sel.Table, rel.Col1, rel.Through, rel.Col2)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, rel.Col1)
|
||||
c.w.WriteString(`) = (`)
|
||||
colWithTable(c.w, rel.Through, rel.Col2)
|
||||
c.w.WriteString(`))`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderWhere(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
st := util.NewStack()
|
||||
|
||||
if sel.Where != nil {
|
||||
st.Push(sel.Where)
|
||||
}
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
|
||||
switch val := intf.(type) {
|
||||
case qcode.ExpOp:
|
||||
switch val {
|
||||
case qcode.OpAnd:
|
||||
c.w.WriteString(` AND `)
|
||||
case qcode.OpOr:
|
||||
c.w.WriteString(` OR `)
|
||||
case qcode.OpNot:
|
||||
c.w.WriteString(`NOT `)
|
||||
default:
|
||||
return fmt.Errorf("11: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
case *qcode.Exp:
|
||||
switch val.Op {
|
||||
case qcode.OpAnd, qcode.OpOr:
|
||||
for i := len(val.Children) - 1; i >= 0; i-- {
|
||||
st.Push(val.Children[i])
|
||||
if i > 0 {
|
||||
st.Push(val.Op)
|
||||
}
|
||||
}
|
||||
qcode.FreeExp(val)
|
||||
|
||||
case qcode.OpNot:
|
||||
st.Push(val.Children[0])
|
||||
st.Push(qcode.OpNot)
|
||||
qcode.FreeExp(val)
|
||||
|
||||
default:
|
||||
if val.NestedCol {
|
||||
//fmt.Fprintf(w, `(("%s") `, val.Col)
|
||||
c.w.WriteString(`(("`)
|
||||
c.w.WriteString(val.Col)
|
||||
c.w.WriteString(`") `)
|
||||
|
||||
} else if len(val.Col) != 0 {
|
||||
//fmt.Fprintf(w, `(("%s"."%s") `, c.sel.Table, val.Col)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, val.Col)
|
||||
c.w.WriteString(`) `)
|
||||
}
|
||||
valExists := true
|
||||
|
||||
switch val.Op {
|
||||
case qcode.OpEquals:
|
||||
c.w.WriteString(`=`)
|
||||
case qcode.OpNotEquals:
|
||||
c.w.WriteString(`!=`)
|
||||
case qcode.OpGreaterOrEquals:
|
||||
c.w.WriteString(`>=`)
|
||||
case qcode.OpLesserOrEquals:
|
||||
c.w.WriteString(`<=`)
|
||||
case qcode.OpGreaterThan:
|
||||
c.w.WriteString(`>`)
|
||||
case qcode.OpLesserThan:
|
||||
c.w.WriteString(`<`)
|
||||
case qcode.OpIn:
|
||||
c.w.WriteString(`IN`)
|
||||
case qcode.OpNotIn:
|
||||
c.w.WriteString(`NOT IN`)
|
||||
case qcode.OpLike:
|
||||
c.w.WriteString(`LIKE`)
|
||||
case qcode.OpNotLike:
|
||||
c.w.WriteString(`NOT LIKE`)
|
||||
case qcode.OpILike:
|
||||
c.w.WriteString(`ILIKE`)
|
||||
case qcode.OpNotILike:
|
||||
c.w.WriteString(`NOT ILIKE`)
|
||||
case qcode.OpSimilar:
|
||||
c.w.WriteString(`SIMILAR TO`)
|
||||
case qcode.OpNotSimilar:
|
||||
c.w.WriteString(`NOT SIMILAR TO`)
|
||||
case qcode.OpContains:
|
||||
c.w.WriteString(`@>`)
|
||||
case qcode.OpContainedIn:
|
||||
c.w.WriteString(`<@`)
|
||||
case qcode.OpHasKey:
|
||||
c.w.WriteString(`?`)
|
||||
case qcode.OpHasKeyAny:
|
||||
c.w.WriteString(`?|`)
|
||||
case qcode.OpHasKeyAll:
|
||||
c.w.WriteString(`?&`)
|
||||
case qcode.OpIsNull:
|
||||
if strings.EqualFold(val.Val, "true") {
|
||||
c.w.WriteString(`IS NULL)`)
|
||||
} else {
|
||||
c.w.WriteString(`IS NOT NULL)`)
|
||||
}
|
||||
valExists = false
|
||||
case qcode.OpEqID:
|
||||
if len(ti.PrimaryCol) == 0 {
|
||||
return fmt.Errorf("no primary key column defined for %s", ti.Name)
|
||||
}
|
||||
//fmt.Fprintf(w, `(("%s") =`, c.ti.PrimaryCol)
|
||||
c.w.WriteString(`((`)
|
||||
colWithTable(c.w, ti.Name, ti.PrimaryCol)
|
||||
//c.w.WriteString(ti.PrimaryCol)
|
||||
c.w.WriteString(`) =`)
|
||||
case qcode.OpTsQuery:
|
||||
if len(ti.TSVCol) == 0 {
|
||||
return fmt.Errorf("no tsv column defined for %s", ti.Name)
|
||||
}
|
||||
//fmt.Fprintf(w, `(("%s") @@ to_tsquery('%s'))`, c.ti.TSVCol, val.Val)
|
||||
c.w.WriteString(`(("`)
|
||||
c.w.WriteString(ti.TSVCol)
|
||||
c.w.WriteString(`") @@ to_tsquery('`)
|
||||
c.w.WriteString(val.Val)
|
||||
c.w.WriteString(`'))`)
|
||||
valExists = false
|
||||
|
||||
default:
|
||||
return fmt.Errorf("[Where] unexpected op code %d", val.Op)
|
||||
}
|
||||
|
||||
if valExists {
|
||||
if val.Type == qcode.ValList {
|
||||
c.renderList(val)
|
||||
} else {
|
||||
c.renderVal(val, c.vars)
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
qcode.FreeExp(val)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("12: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderOrderBy(sel *qcode.Select, ti *DBTableInfo) error {
|
||||
c.w.WriteString(` ORDER BY `)
|
||||
for i := range sel.OrderBy {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
ob := sel.OrderBy[i]
|
||||
|
||||
switch ob.Order {
|
||||
case qcode.OrderAsc:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` ASC`)
|
||||
case qcode.OrderDesc:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s" DESC`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` DESC`)
|
||||
case qcode.OrderAscNullsFirst:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s" ASC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` ASC NULLS FIRST`)
|
||||
case qcode.OrderDescNullsFirst:
|
||||
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS FIRST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` DESC NULLLS FIRST`)
|
||||
case qcode.OrderAscNullsLast:
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s ASC NULLS LAST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` ASC NULLS LAST`)
|
||||
case qcode.OrderDescNullsLast:
|
||||
//fmt.Fprintf(w, `%s_%d.ob.%s DESC NULLS LAST`, sel.Table, sel.ID, ob.Col)
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, ob.Col, "_ob")
|
||||
c.w.WriteString(` DESC NULLS LAST`)
|
||||
default:
|
||||
return fmt.Errorf("13: unexpected value %v", ob.Order)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderDistinctOn(sel *qcode.Select, ti *DBTableInfo) {
|
||||
io.WriteString(c.w, `DISTINCT ON (`)
|
||||
for i := range sel.DistinctOn {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
//fmt.Fprintf(w, `"%s_%d.ob.%s"`, c.sel.Table, c.sel.ID, c.sel.DistinctOn[i])
|
||||
tableIDColSuffix(c.w, ti.Name, sel.ID, sel.DistinctOn[i], "_ob")
|
||||
}
|
||||
c.w.WriteString(`) `)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderList(ex *qcode.Exp) {
|
||||
io.WriteString(c.w, ` (`)
|
||||
for i := range ex.ListVal {
|
||||
if i != 0 {
|
||||
c.w.WriteString(`, `)
|
||||
}
|
||||
switch ex.ListType {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
c.w.WriteString(ex.ListVal[i])
|
||||
case qcode.ValStr:
|
||||
c.w.WriteString(`'`)
|
||||
c.w.WriteString(ex.ListVal[i])
|
||||
c.w.WriteString(`'`)
|
||||
}
|
||||
}
|
||||
c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string) {
|
||||
io.WriteString(c.w, ` `)
|
||||
|
||||
switch ex.Type {
|
||||
case qcode.ValBool, qcode.ValInt, qcode.ValFloat:
|
||||
if len(ex.Val) != 0 {
|
||||
c.w.WriteString(ex.Val)
|
||||
} else {
|
||||
c.w.WriteString(`''`)
|
||||
}
|
||||
|
||||
case qcode.ValStr:
|
||||
c.w.WriteString(`'`)
|
||||
c.w.WriteString(ex.Val)
|
||||
c.w.WriteString(`'`)
|
||||
|
||||
case qcode.ValVar:
|
||||
if val, ok := vars[ex.Val]; ok {
|
||||
c.w.WriteString(val)
|
||||
} else {
|
||||
//fmt.Fprintf(w, `'{{%s}}'`, ex.Val)
|
||||
c.w.WriteString(`{{`)
|
||||
c.w.WriteString(ex.Val)
|
||||
c.w.WriteString(`}}`)
|
||||
}
|
||||
}
|
||||
//c.w.WriteString(`)`)
|
||||
}
|
||||
|
||||
func funcPrefixLen(fn string) int {
|
||||
switch {
|
||||
case strings.HasPrefix(fn, "avg_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "count_"):
|
||||
return 6
|
||||
case strings.HasPrefix(fn, "max_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "min_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "sum_"):
|
||||
return 4
|
||||
case strings.HasPrefix(fn, "stddev_"):
|
||||
return 7
|
||||
case strings.HasPrefix(fn, "stddev_pop_"):
|
||||
return 11
|
||||
case strings.HasPrefix(fn, "stddev_samp_"):
|
||||
return 12
|
||||
case strings.HasPrefix(fn, "variance_"):
|
||||
return 9
|
||||
case strings.HasPrefix(fn, "var_pop_"):
|
||||
return 8
|
||||
case strings.HasPrefix(fn, "var_samp_"):
|
||||
return 9
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func hasBit(n uint32, pos uint32) bool {
|
||||
val := n & (1 << pos)
|
||||
return (val > 0)
|
||||
}
|
||||
|
||||
func alias(w *bytes.Buffer, alias string) {
|
||||
w.WriteString(` AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func aliasWithID(w *bytes.Buffer, alias string, id int32) {
|
||||
w.WriteString(` AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func aliasWithIDSuffix(w *bytes.Buffer, alias string, id int32, suffix string) {
|
||||
w.WriteString(` AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(suffix)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithAlias(w *bytes.Buffer, col, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func tableWithAlias(w *bytes.Buffer, table, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTable(w *bytes.Buffer, table, col string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTableID(w *bytes.Buffer, table string, id int32, col string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTableIDAlias(w *bytes.Buffer, table string, id int32, col, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func colWithTableIDSuffixAlias(w *bytes.Buffer, table string, id int32,
|
||||
suffix, col, alias string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(suffix)
|
||||
w.WriteString(`"."`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(`" AS "`)
|
||||
w.WriteString(alias)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
func tableIDColSuffix(w *bytes.Buffer, table string, id int32, col, suffix string) {
|
||||
w.WriteString(`"`)
|
||||
w.WriteString(table)
|
||||
w.WriteString(`_`)
|
||||
int2string(w, id)
|
||||
w.WriteString(`_`)
|
||||
w.WriteString(col)
|
||||
w.WriteString(suffix)
|
||||
w.WriteString(`"`)
|
||||
}
|
||||
|
||||
const charset = "0123456789"
|
||||
|
||||
func int2string(w *bytes.Buffer, val int32) {
|
||||
if val < 10 {
|
||||
w.WriteByte(charset[val])
|
||||
return
|
||||
}
|
||||
|
||||
temp := int32(0)
|
||||
val2 := val
|
||||
for val2 > 0 {
|
||||
temp *= 10
|
||||
temp += val2 % 10
|
||||
val2 = int32(math.Floor(float64(val2 / 10)))
|
||||
}
|
||||
|
||||
val3 := temp
|
||||
for val3 > 0 {
|
||||
d := val3 % 10
|
||||
val3 /= 10
|
||||
w.WriteByte(charset[d])
|
||||
}
|
||||
}
|
||||
|
||||
func relID(h *xxhash.Digest, child, parent string) uint64 {
|
||||
h.WriteString(child)
|
||||
h.WriteString(parent)
|
||||
v := h.Sum64()
|
||||
h.Reset()
|
||||
return v
|
||||
}
|
551
psql/select_test.go
Normal file
@ -0,0 +1,551 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/dosco/super-graph/qcode"
|
||||
)
|
||||
|
||||
const (
|
||||
errNotExpected = "Generated SQL did not match what was expected"
|
||||
)
|
||||
|
||||
var (
|
||||
qcompile *qcode.Compiler
|
||||
pcompile *Compiler
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
|
||||
qcompile, err = qcode.NewCompiler(qcode.Config{
|
||||
DefaultFilter: []string{
|
||||
`{ user_id: { _eq: $user_id } }`,
|
||||
},
|
||||
FilterMap: map[string][]string{
|
||||
"users": []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
"products": []string{
|
||||
"{ price: { gt: 0 } }",
|
||||
"{ price: { lt: 8 } }",
|
||||
},
|
||||
"customers": []string{},
|
||||
"mes": []string{
|
||||
"{ id: { eq: $user_id } }",
|
||||
},
|
||||
},
|
||||
Blocklist: []string{
|
||||
"secret",
|
||||
"password",
|
||||
"token",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tables := []*DBTable{
|
||||
&DBTable{Name: "customers", Type: "table"},
|
||||
&DBTable{Name: "users", Type: "table"},
|
||||
&DBTable{Name: "products", Type: "table"},
|
||||
&DBTable{Name: "purchases", Type: "table"},
|
||||
}
|
||||
|
||||
columns := [][]*DBColumn{
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 4, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 6, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 8, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 9, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 10, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "full_name", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 3, Name: "phone", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 4, Name: "avatar", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "email", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 6, Name: "encrypted_password", Type: "character varying", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "reset_password_token", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 8, Name: "reset_password_sent_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 9, Name: "remember_created_at", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 10, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 11, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "name", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 3, Name: "description", Type: "text", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 4, Name: "price", Type: "numeric(7,2)", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "user_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "users", FKeyColID: []int16{1}},
|
||||
&DBColumn{ID: 6, Name: "created_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "updated_at", Type: "timestamp without time zone", NotNull: true, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 8, Name: "tsv", Type: "tsvector", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
[]*DBColumn{
|
||||
&DBColumn{ID: 1, Name: "id", Type: "bigint", NotNull: true, PrimaryKey: true, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 2, Name: "customer_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "customers", FKeyColID: []int16{1}},
|
||||
&DBColumn{ID: 3, Name: "product_id", Type: "bigint", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "products", FKeyColID: []int16{1}},
|
||||
&DBColumn{ID: 4, Name: "sale_type", Type: "character varying", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 5, Name: "quantity", Type: "integer", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 6, Name: "due_date", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)},
|
||||
&DBColumn{ID: 7, Name: "returned", Type: "timestamp without time zone", NotNull: false, PrimaryKey: false, UniqueKey: false, FKeyTable: "", FKeyColID: []int16(nil)}},
|
||||
}
|
||||
|
||||
schema := &DBSchema{
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
al: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
aliases := map[string][]string{
|
||||
"users": []string{"mes"},
|
||||
}
|
||||
|
||||
for i, t := range tables {
|
||||
schema.updateSchema(t, columns[i], aliases)
|
||||
}
|
||||
|
||||
vars := NewVariables(map[string]string{
|
||||
"account_id": "select account_id from users where id = $user_id",
|
||||
})
|
||||
|
||||
pcompile = NewCompiler(Config{
|
||||
Schema: schema,
|
||||
Vars: vars,
|
||||
})
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func compileGQLToPSQL(gql string, vars Variables) ([]byte, error) {
|
||||
|
||||
qc, err := qcompile.Compile([]byte(gql))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, sqlStmt, err := pcompile.CompileEx(qc, vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sqlStmt, nil
|
||||
}
|
||||
|
||||
func withComplexArgs(t *testing.T) {
|
||||
gql := `query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 20 and < 28 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0" ORDER BY "products_0_price_ob" DESC), '[]') AS "products" FROM (SELECT DISTINCT ON ("products_0_price_ob") row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0", "products_0"."price" AS "products_0_price_ob" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") < 28) AND (("products"."id") >= 20)) LIMIT ('30') :: integer) AS "products_0" ORDER BY "products_0_price_ob" DESC LIMIT ('30') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereMultiOr(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
or: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 },
|
||||
price: { lt: 20 }
|
||||
} }
|
||||
) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") < 20) OR (("products"."price") > 10) OR NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereIsNull(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: {
|
||||
not: { id: { is_null: true } },
|
||||
price: { gt: 10 }
|
||||
}}) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") > 10) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func withWhereAndList(t *testing.T) {
|
||||
gql := `query {
|
||||
products(
|
||||
where: {
|
||||
and: [
|
||||
{ not: { id: { is_null: true } } },
|
||||
{ price: { gt: 10 } },
|
||||
] } ) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name", "products_0"."price" AS "price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name", "products"."price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") > 10) AND NOT (("products"."id") IS NULL)) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func fetchByID(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: 15) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") = 15)) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func searchQuery(t *testing.T) {
|
||||
gql := `query {
|
||||
products(search: "Imperial") {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("tsv") @@ to_tsquery('Imperial'))) LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func oneToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
users {
|
||||
email
|
||||
products {
|
||||
name
|
||||
price
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('users', users) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email", "products_1_join"."products" AS "products") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email", "users"."id" FROM "users" WHERE ((("users"."id") = {{user_id}})) LIMIT ('20') :: integer) AS "users_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name", "products_1"."price" AS "price") AS "sel_1")) AS "sel_json_1" FROM (SELECT "products"."name", "products"."price" FROM "products" WHERE ((("products"."user_id") = ("users_0"."id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func belongsTo(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
price
|
||||
users {
|
||||
email
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."price" AS "price", "users_1_join"."users" AS "users") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", "products"."price", "products"."user_id" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "users" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "users_1"."email" AS "email") AS "sel_1")) AS "sel_json_1" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = ("products_0"."user_id"))) LIMIT ('20') :: integer) AS "users_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "users_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func manyToMany(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "customers_1_join"."customers" AS "customers") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", "products"."id" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) LIMIT ('20') :: integer) AS "products_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "customers_1"."email" AS "email", "customers_1"."full_name" AS "full_name") AS "sel_1")) AS "sel_json_1" FROM (SELECT "customers"."email", "customers"."full_name" FROM "customers" LEFT OUTER JOIN "purchases" ON (("purchases"."product_id") = ("products_0"."id")) WHERE ((("customers"."id") = ("purchases"."customer_id"))) LIMIT ('20') :: integer) AS "customers_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "customers_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func manyToManyReverse(t *testing.T) {
|
||||
gql := `query {
|
||||
customers {
|
||||
email
|
||||
full_name
|
||||
products {
|
||||
name
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('customers', customers) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "customers" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "customers_0"."email" AS "email", "customers_0"."full_name" AS "full_name", "products_1_join"."products" AS "products") AS "sel_0")) AS "sel_json_0" FROM (SELECT "customers"."email", "customers"."full_name", "customers"."id" FROM "customers" LIMIT ('20') :: integer) AS "customers_0" LEFT OUTER JOIN LATERAL (SELECT coalesce(json_agg("sel_json_1"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_1" FROM (SELECT "products_1"."name" AS "name") AS "sel_1")) AS "sel_json_1" FROM (SELECT "products"."name" FROM "products" LEFT OUTER JOIN "purchases" ON (("purchases"."customer_id") = ("customers_0"."id")) WHERE ((("products"."id") = ("purchases"."product_id"))) LIMIT ('20') :: integer) AS "products_1" LIMIT ('20') :: integer) AS "sel_json_agg_1") AS "products_1_join" ON ('true') LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func aggFunction(t *testing.T) {
|
||||
gql := `query {
|
||||
products {
|
||||
name
|
||||
count_price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."name" AS "name", "products_0"."count_price" AS "count_price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."name", count("products"."price") AS "count_price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8)) GROUP BY "products"."name" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func aggFunctionWithFilter(t *testing.T) {
|
||||
gql := `query {
|
||||
products(where: { id: { gt: 10 } }) {
|
||||
id
|
||||
max_price
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('products', products) FROM (SELECT coalesce(json_agg("sel_json_0"), '[]') AS "products" FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."max_price" AS "max_price") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", max("products"."price") AS "max_price" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."id") > 10)) GROUP BY "products"."id" LIMIT ('20') :: integer) AS "products_0" LIMIT ('20') :: integer) AS "sel_json_agg_0") AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func queryWithVariables(t *testing.T) {
|
||||
gql := `query {
|
||||
product(id: $PRODUCT_ID, where: { price: { eq: $PRODUCT_PRICE } }) {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('product', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "products_0"."id" AS "id", "products_0"."name" AS "name") AS "sel_0")) AS "sel_json_0" FROM (SELECT "products"."id", "products"."name" FROM "products" WHERE ((("products"."price") > 0) AND (("products"."price") < 8) AND (("products"."price") = {{product_price}}) AND (("products"."id") = {{product_id}})) LIMIT ('1') :: integer) AS "products_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func syntheticTables(t *testing.T) {
|
||||
gql := `query {
|
||||
me {
|
||||
email
|
||||
}
|
||||
}`
|
||||
|
||||
sql := `SELECT json_object_agg('me', sel_json_0) FROM (SELECT row_to_json((SELECT "sel_0" FROM (SELECT "users_0"."email" AS "email") AS "sel_0")) AS "sel_json_0" FROM (SELECT "users"."email" FROM "users" WHERE ((("users"."id") = {{user_id}})) LIMIT ('1') :: integer) AS "users_0" LIMIT ('1') :: integer) AS "done_1337";`
|
||||
|
||||
resSQL, err := compileGQLToPSQL(gql, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(resSQL) != sql {
|
||||
t.Fatal(errNotExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompileSelect(t *testing.T) {
|
||||
t.Run("withComplexArgs", withComplexArgs)
|
||||
t.Run("withWhereAndList", withWhereAndList)
|
||||
t.Run("withWhereIsNull", withWhereIsNull)
|
||||
t.Run("withWhereMultiOr", withWhereMultiOr)
|
||||
t.Run("fetchByID", fetchByID)
|
||||
t.Run("searchQuery", searchQuery)
|
||||
t.Run("belongsTo", belongsTo)
|
||||
t.Run("oneToMany", oneToMany)
|
||||
t.Run("manyToMany", manyToMany)
|
||||
t.Run("manyToManyReverse", manyToManyReverse)
|
||||
t.Run("aggFunction", aggFunction)
|
||||
t.Run("aggFunctionWithFilter", aggFunctionWithFilter)
|
||||
t.Run("syntheticTables", syntheticTables)
|
||||
t.Run("queryWithVariables", queryWithVariables)
|
||||
|
||||
}
|
||||
|
||||
var benchGQL = []byte(`query {
|
||||
proDUcts(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { and: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
NAME
|
||||
price
|
||||
user {
|
||||
full_name
|
||||
picture : avatar
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
func BenchmarkCompile(b *testing.B) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCompileParallel(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
for pb.Next() {
|
||||
w.Reset()
|
||||
|
||||
qc, err := qcompile.Compile(benchGQL)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = pcompile.Compile(qc, w, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
47
psql/stack.go
Normal file
@ -0,0 +1,47 @@
|
||||
package psql
|
||||
|
||||
type Stack struct {
|
||||
stA [20]int32
|
||||
st []int32
|
||||
top int
|
||||
}
|
||||
|
||||
// Create a new Stack
|
||||
func NewStack() *Stack {
|
||||
s := &Stack{top: -1}
|
||||
s.st = s.stA[:0]
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the number of items in the Stack
|
||||
func (s *Stack) Len() int {
|
||||
return (s.top + 1)
|
||||
}
|
||||
|
||||
// View the top item on the Stack
|
||||
func (s *Stack) Peek() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
return s.st[s.top]
|
||||
}
|
||||
|
||||
// Pop the top item of the Stack and return it
|
||||
func (s *Stack) Pop() int32 {
|
||||
if s.top == -1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s.top--
|
||||
return s.st[(s.top + 1)]
|
||||
}
|
||||
|
||||
// Push a value onto the top of the Stack
|
||||
func (s *Stack) Push(value int32) {
|
||||
s.top++
|
||||
if len(s.st) <= s.top {
|
||||
s.st = append(s.st, value)
|
||||
} else {
|
||||
s.st[s.top] = value
|
||||
}
|
||||
}
|
366
psql/tables.go
@ -1,30 +1,161 @@
|
||||
package psql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-pg/pg"
|
||||
"github.com/gobuffalo/flect"
|
||||
"github.com/jackc/pgtype"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
)
|
||||
|
||||
type TCKey struct {
|
||||
Table, Column string
|
||||
type DBTable struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
type TTKey struct {
|
||||
Table1, Table2 string
|
||||
func GetTables(dbc *pgxpool.Conn) ([]*DBTable, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
c.relname as "name",
|
||||
CASE c.relkind WHEN 'r' THEN 'table'
|
||||
WHEN 'v' THEN 'view'
|
||||
WHEN 'm' THEN 'materialized view'
|
||||
WHEN 'f' THEN 'foreign table'
|
||||
END as "type"
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relkind IN ('r','v','m','f','')
|
||||
AND n.nspname <> ('pg_catalog')
|
||||
AND n.nspname <> ('information_schema')
|
||||
AND n.nspname !~ ('^pg_toast')
|
||||
AND pg_catalog.pg_table_is_visible(c.oid);`
|
||||
|
||||
var tables []*DBTable
|
||||
|
||||
rows, err := dbc.Query(context.Background(), sqlStmt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching tables: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
t := DBTable{}
|
||||
err = rows.Scan(&t.Name, &t.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tables = append(tables, &t)
|
||||
}
|
||||
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
type DBColumn struct {
|
||||
ID int16
|
||||
Name string
|
||||
Type string
|
||||
NotNull bool
|
||||
PrimaryKey bool
|
||||
UniqueKey bool
|
||||
FKeyTable string
|
||||
FKeyColID []int16
|
||||
fKeyColID pgtype.Int2Array
|
||||
}
|
||||
|
||||
func GetColumns(dbc *pgxpool.Conn, schema, table string) ([]*DBColumn, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
f.attnum AS id,
|
||||
f.attname AS name,
|
||||
f.attnotnull AS notnull,
|
||||
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
|
||||
CASE
|
||||
WHEN p.contype = ('p'::char) THEN true
|
||||
ELSE false
|
||||
END AS primarykey,
|
||||
CASE
|
||||
WHEN p.contype = ('u'::char) THEN true
|
||||
ELSE false
|
||||
END AS uniquekey,
|
||||
CASE
|
||||
WHEN p.contype = ('f'::char) THEN g.relname
|
||||
ELSE ''::text
|
||||
END AS foreignkey,
|
||||
CASE
|
||||
WHEN p.contype = ('f'::char) THEN p.confkey
|
||||
ELSE ARRAY[]::int2[]
|
||||
END AS foreignkey_fieldnum
|
||||
FROM pg_attribute f
|
||||
JOIN pg_class c ON c.oid = f.attrelid
|
||||
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
|
||||
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
||||
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
||||
WHERE c.relkind = ('r'::char)
|
||||
AND n.nspname = $1 -- Replace with Schema name
|
||||
AND c.relname = $2 -- Replace with table name
|
||||
AND f.attnum > 0
|
||||
AND f.attisdropped = false
|
||||
ORDER BY id;`
|
||||
|
||||
rows, err := dbc.Query(context.Background(), sqlStmt, schema, table)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching columns: %s", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cmap := make(map[int16]*DBColumn)
|
||||
|
||||
for rows.Next() {
|
||||
c := DBColumn{}
|
||||
err = rows.Scan(&c.ID, &c.Name, &c.NotNull, &c.Type, &c.PrimaryKey, &c.UniqueKey,
|
||||
&c.FKeyTable, &c.fKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v, ok := cmap[c.ID]; ok {
|
||||
if c.PrimaryKey {
|
||||
v.PrimaryKey = true
|
||||
}
|
||||
if c.NotNull {
|
||||
v.NotNull = true
|
||||
}
|
||||
if c.UniqueKey {
|
||||
v.UniqueKey = true
|
||||
}
|
||||
} else {
|
||||
err := c.fKeyColID.AssignTo(&c.FKeyColID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmap[c.ID] = &c
|
||||
}
|
||||
}
|
||||
|
||||
cols := make([]*DBColumn, 0, len(cmap))
|
||||
for _, v := range cmap {
|
||||
cols = append(cols, v)
|
||||
}
|
||||
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
type DBSchema struct {
|
||||
Tables map[string]*DBTableInfo
|
||||
RelMap map[TTKey]*DBRel
|
||||
t map[string]*DBTableInfo
|
||||
rm map[string]map[string]*DBRel
|
||||
al map[string]struct{}
|
||||
}
|
||||
|
||||
type DBTableInfo struct {
|
||||
Name string
|
||||
Singular bool
|
||||
PrimaryCol string
|
||||
TSVCol string
|
||||
Columns map[string]*DBColumn
|
||||
ColumnNames []string
|
||||
}
|
||||
|
||||
type RelType int
|
||||
@ -33,6 +164,7 @@ const (
|
||||
RelBelongTo RelType = iota + 1
|
||||
RelOneToMany
|
||||
RelOneToManyThrough
|
||||
RelRemote
|
||||
)
|
||||
|
||||
type DBRel struct {
|
||||
@ -43,56 +175,96 @@ type DBRel struct {
|
||||
Col2 string
|
||||
}
|
||||
|
||||
func NewDBSchema(db *pg.DB) (*DBSchema, error) {
|
||||
func NewDBSchema(db *pgxpool.Pool, aliases map[string][]string) (*DBSchema, error) {
|
||||
schema := &DBSchema{
|
||||
Tables: make(map[string]*DBTableInfo),
|
||||
RelMap: make(map[TTKey]*DBRel),
|
||||
t: make(map[string]*DBTableInfo),
|
||||
rm: make(map[string]map[string]*DBRel),
|
||||
al: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
tables, err := GetTables(db)
|
||||
dbc, err := db.Acquire(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error acquiring connection from pool")
|
||||
}
|
||||
defer dbc.Release()
|
||||
|
||||
tables, err := GetTables(dbc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, t := range tables {
|
||||
cols, err := GetColumns(db, "public", t.Name)
|
||||
cols, err := GetColumns(dbc, "public", t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema.updateSchema(t, cols)
|
||||
schema.updateSchema(t, cols, aliases)
|
||||
}
|
||||
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) updateSchema(t *DBTable, cols []*DBColumn) {
|
||||
// Current table
|
||||
ti := &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Columns: make(map[string]*DBColumn, len(cols)),
|
||||
}
|
||||
func (s *DBSchema) updateSchema(
|
||||
t *DBTable,
|
||||
cols []*DBColumn,
|
||||
aliases map[string][]string) {
|
||||
|
||||
// Foreign key columns in current table
|
||||
var jcols []*DBColumn
|
||||
colByID := make(map[int]*DBColumn)
|
||||
colByID := make(map[int16]*DBColumn)
|
||||
columns := make(map[string]*DBColumn, len(cols))
|
||||
colNames := make([]string, 0, len(cols))
|
||||
|
||||
for i := range cols {
|
||||
c := cols[i]
|
||||
ti.Columns[strings.ToLower(c.Name)] = cols[i]
|
||||
colByID[c.ID] = cols[i]
|
||||
name := strings.ToLower(c.Name)
|
||||
columns[name] = c
|
||||
colNames = append(colNames, name)
|
||||
colByID[c.ID] = c
|
||||
}
|
||||
|
||||
singular := strings.ToLower(flect.Singularize(t.Name))
|
||||
s.t[singular] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Singular: true,
|
||||
Columns: columns,
|
||||
ColumnNames: colNames,
|
||||
}
|
||||
|
||||
plural := strings.ToLower(flect.Pluralize(t.Name))
|
||||
s.t[plural] = &DBTableInfo{
|
||||
Name: t.Name,
|
||||
Singular: false,
|
||||
Columns: columns,
|
||||
ColumnNames: colNames,
|
||||
}
|
||||
|
||||
ct := strings.ToLower(t.Name)
|
||||
s.Tables[ct] = ti
|
||||
|
||||
if al, ok := aliases[ct]; ok {
|
||||
for i := range al {
|
||||
k1 := flect.Singularize(al[i])
|
||||
s.t[k1] = s.t[singular]
|
||||
|
||||
k2 := flect.Pluralize(al[i])
|
||||
s.t[k2] = s.t[plural]
|
||||
|
||||
s.al[k1] = struct{}{}
|
||||
s.al[k2] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
jcols := make([]*DBColumn, 0, len(cols))
|
||||
|
||||
for _, c := range cols {
|
||||
switch {
|
||||
case c.Type == "tsvector":
|
||||
s.Tables[ct].TSVCol = c.Name
|
||||
s.t[singular].TSVCol = c.Name
|
||||
s.t[plural].TSVCol = c.Name
|
||||
|
||||
case c.PrimaryKey:
|
||||
s.Tables[ct].PrimaryCol = c.Name
|
||||
s.t[singular].PrimaryCol = c.Name
|
||||
s.t[plural].PrimaryCol = c.Name
|
||||
|
||||
case len(c.FKeyTable) != 0:
|
||||
if len(c.FKeyColID) == 0 {
|
||||
@ -109,12 +281,12 @@ func (s *DBSchema) updateSchema(t *DBTable, cols []*DBColumn) {
|
||||
// Belongs-to relation between current table and the
|
||||
// table in the foreign key
|
||||
rel1 := &DBRel{RelBelongTo, "", "", c.Name, fc.Name}
|
||||
s.RelMap[TTKey{ct, ft}] = rel1
|
||||
s.SetRel(ct, ft, rel1)
|
||||
|
||||
// One-to-many relation between the foreign key table and the
|
||||
// the current table
|
||||
rel2 := &DBRel{RelOneToMany, "", "", fc.Name, c.Name}
|
||||
s.RelMap[TTKey{ft, ct}] = rel2
|
||||
s.SetRel(ft, ct, rel2)
|
||||
|
||||
jcols = append(jcols, c)
|
||||
}
|
||||
@ -138,7 +310,9 @@ func (s *DBSchema) updateSchema(t *DBTable, cols []*DBColumn) {
|
||||
}
|
||||
|
||||
func (s *DBSchema) updateSchemaOTMT(
|
||||
ct string, col1, col2 *DBColumn, colByID map[int]*DBColumn) {
|
||||
ct string,
|
||||
col1, col2 *DBColumn,
|
||||
colByID map[int16]*DBColumn) {
|
||||
|
||||
t1 := strings.ToLower(col1.FKeyTable)
|
||||
t2 := strings.ToLower(col2.FKeyTable)
|
||||
@ -156,110 +330,56 @@ func (s *DBSchema) updateSchemaOTMT(
|
||||
// 2nd foreign key table
|
||||
//rel1 := &DBRel{RelOneToManyThrough, ct, fc1.Name, col1.Name}
|
||||
rel1 := &DBRel{RelOneToManyThrough, ct, col2.Name, fc2.Name, col1.Name}
|
||||
s.RelMap[TTKey{t1, t2}] = rel1
|
||||
s.SetRel(t1, t2, rel1)
|
||||
|
||||
// One-to-many-through relation between 2nd foreign key table and the
|
||||
// 1nd foreign key table
|
||||
//rel2 := &DBRel{RelOneToManyThrough, ct, col2.Name, fc2.Name}
|
||||
rel2 := &DBRel{RelOneToManyThrough, ct, col1.Name, fc1.Name, col2.Name}
|
||||
s.RelMap[TTKey{t2, t1}] = rel2
|
||||
}
|
||||
|
||||
type DBTable struct {
|
||||
Name string `sql:"name"`
|
||||
Type string `sql:"type"`
|
||||
}
|
||||
|
||||
func GetTables(db *pg.DB) ([]*DBTable, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
c.relname as "name",
|
||||
CASE c.relkind WHEN 'r' THEN 'table'
|
||||
WHEN 'v' THEN 'view'
|
||||
WHEN 'm' THEN 'materialized view'
|
||||
WHEN 'f' THEN 'foreign table' END as "type"
|
||||
FROM pg_catalog.pg_class c
|
||||
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relkind IN ('r','v','m','f','')
|
||||
AND n.nspname <> 'pg_catalog'
|
||||
AND n.nspname <> 'information_schema'
|
||||
AND n.nspname !~ '^pg_toast'
|
||||
AND pg_catalog.pg_table_is_visible(c.oid);
|
||||
`
|
||||
|
||||
var t []*DBTable
|
||||
_, err := db.Query(&t, sqlStmt)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fetching tables: %s", err)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
type DBColumn struct {
|
||||
ID int `sql:"id"`
|
||||
Name string `sql:"name"`
|
||||
Type string `sql:"type"`
|
||||
NotNull bool `sql:"notnull"`
|
||||
PrimaryKey bool `sql:"primarykey"`
|
||||
Uniquekey bool `sql:"uniquekey"`
|
||||
FKeyTable string `sql:"foreignkey"`
|
||||
FKeyColID []int `sql:"foreignkey_fieldnum,array"`
|
||||
}
|
||||
|
||||
func GetColumns(db *pg.DB, schema, table string) ([]*DBColumn, error) {
|
||||
sqlStmt := `
|
||||
SELECT
|
||||
f.attnum AS id,
|
||||
f.attname AS name,
|
||||
f.attnotnull AS notnull,
|
||||
pg_catalog.format_type(f.atttypid,f.atttypmod) AS type,
|
||||
CASE
|
||||
WHEN p.contype = 'p' THEN 't'
|
||||
ELSE 'f'
|
||||
END AS primarykey,
|
||||
CASE
|
||||
WHEN p.contype = 'u' THEN 't'
|
||||
ELSE 'f'
|
||||
END AS uniquekey,
|
||||
CASE
|
||||
WHEN p.contype = 'f' THEN g.relname
|
||||
END AS foreignkey,
|
||||
CASE
|
||||
WHEN p.contype = 'f' THEN p.confkey
|
||||
END AS foreignkey_fieldnum
|
||||
FROM pg_attribute f
|
||||
JOIN pg_class c ON c.oid = f.attrelid
|
||||
JOIN pg_type t ON t.oid = f.atttypid
|
||||
LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum
|
||||
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
||||
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
||||
WHERE c.relkind = 'r'::char
|
||||
AND n.nspname = $1 -- Replace with Schema name
|
||||
AND c.relname = $2 -- Replace with table name
|
||||
AND f.attnum > 0 ORDER BY id;
|
||||
`
|
||||
|
||||
stmt, err := db.Prepare(sqlStmt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching columns: %s", err)
|
||||
}
|
||||
|
||||
var t []*DBColumn
|
||||
_, err = stmt.Query(&t, schema, table)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching columns: %s", err)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
s.SetRel(t2, t1, rel2)
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetTable(table string) (*DBTableInfo, error) {
|
||||
t, ok := s.Tables[table]
|
||||
t, ok := s.t[table]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown table '%s'", table)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) SetRel(child, parent string, rel *DBRel) error {
|
||||
sc := strings.ToLower(flect.Singularize(child))
|
||||
pc := strings.ToLower(flect.Pluralize(child))
|
||||
|
||||
if _, ok := s.rm[sc]; !ok {
|
||||
s.rm[sc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
if _, ok := s.rm[pc]; !ok {
|
||||
s.rm[pc] = make(map[string]*DBRel)
|
||||
}
|
||||
|
||||
sp := strings.ToLower(flect.Singularize(parent))
|
||||
pp := strings.ToLower(flect.Pluralize(parent))
|
||||
|
||||
s.rm[sc][sp] = rel
|
||||
s.rm[sc][pp] = rel
|
||||
s.rm[pc][sp] = rel
|
||||
s.rm[pc][pp] = rel
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) GetRel(child, parent string) (*DBRel, error) {
|
||||
rel, ok := s.rm[child][parent]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown relationship '%s' -> '%s'",
|
||||
child, parent)
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
func (s *DBSchema) IsAlias(name string) bool {
|
||||
_, ok := s.al[name]
|
||||
return ok
|
||||
}
|
||||
|
7
qcode/bench.0
Normal file
@ -0,0 +1,7 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/qcode
|
||||
BenchmarkQCompile-8 100000 18528 ns/op 9208 B/op 107 allocs/op
|
||||
BenchmarkQCompileP-8 300000 5952 ns/op 9208 B/op 107 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/qcode 3.893s
|
7
qcode/bench.1
Normal file
@ -0,0 +1,7 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/qcode
|
||||
BenchmarkQCompile-8 100000 18240 ns/op 7454 B/op 88 allocs/op
|
||||
BenchmarkQCompileP-8 300000 5788 ns/op 7494 B/op 88 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/qcode 3.813s
|
7
qcode/bench.2
Normal file
@ -0,0 +1,7 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/qcode
|
||||
BenchmarkQCompile-8 100000 17231 ns/op 3352 B/op 87 allocs/op
|
||||
BenchmarkQCompileP-8 300000 5023 ns/op 3387 B/op 87 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/qcode 3.462s
|
7
qcode/bench.3
Normal file
@ -0,0 +1,7 @@
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/dosco/super-graph/qcode
|
||||
BenchmarkQCompile-8 200000 10029 ns/op 2291 B/op 38 allocs/op
|
||||
BenchmarkQCompileP-8 500000 2925 ns/op 2298 B/op 38 allocs/op
|
||||
PASS
|
||||
ok github.com/dosco/super-graph/qcode 3.616s
|
14
qcode/fuzz.go
Normal file
@ -0,0 +1,14 @@
|
||||
package qcode
|
||||
|
||||
// FuzzerEntrypoint for Fuzzbuzz
|
||||
func FuzzerEntrypoint(data []byte) int {
|
||||
//testData := string(data)
|
||||
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile(data)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
238
qcode/lex.go
@ -1,58 +1,37 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
queryToken = []byte("query")
|
||||
mutationToken = []byte("mutation")
|
||||
subscriptionToken = []byte("subscription")
|
||||
trueToken = []byte("true")
|
||||
falseToken = []byte("false")
|
||||
quotesToken = []byte(`'"`)
|
||||
signsToken = []byte(`+-`)
|
||||
punctuatorToken = []byte(`!():=[]{|}`)
|
||||
spreadToken = []byte(`...`)
|
||||
digitToken = []byte(`0123456789`)
|
||||
dotToken = []byte(`.`)
|
||||
)
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
line int // The line number at the start of this item.
|
||||
}
|
||||
|
||||
func (i *item) String() string {
|
||||
var v string
|
||||
|
||||
switch i.typ {
|
||||
case itemEOF:
|
||||
v = "EOF"
|
||||
case itemError:
|
||||
v = "error"
|
||||
case itemName:
|
||||
v = "name"
|
||||
case itemQuery:
|
||||
v = "query"
|
||||
case itemMutation:
|
||||
v = "mutation"
|
||||
case itemSub:
|
||||
v = "subscription"
|
||||
case itemPunctuator:
|
||||
v = "punctuator"
|
||||
case itemDirective:
|
||||
v = "directive"
|
||||
case itemVariable:
|
||||
v = "variable"
|
||||
case itemIntVal:
|
||||
v = "int"
|
||||
case itemFloatVal:
|
||||
v = "float"
|
||||
case itemStringVal:
|
||||
v = "string"
|
||||
}
|
||||
return fmt.Sprintf("%s %q", v, i.val)
|
||||
end Pos // The ending position, in bytes, of this item in the input string.
|
||||
line uint16 // The line number at the start of this item.
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
@ -102,22 +81,29 @@ type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
input []byte // the string being scanned
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
items []item // array of scanned items
|
||||
line int // 1+number of newlines seen
|
||||
itemsA [50]item
|
||||
line uint16 // 1+number of newlines seen
|
||||
err error
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
var zeroLex = lexer{}
|
||||
|
||||
func (l *lexer) Reset() {
|
||||
*l = zeroLex
|
||||
}
|
||||
|
||||
// next returns the next byte in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
r, w := utf8.DecodeRune(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
if r == '\n' {
|
||||
@ -142,30 +128,38 @@ func (l *lexer) backup() {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lexer) current() string {
|
||||
return l.input[l.start:l.pos]
|
||||
func (l *lexer) current() (Pos, Pos) {
|
||||
return l.start, l.pos
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items = append(l.items, item{t, l.start, l.input[l.start:l.pos], l.line})
|
||||
l.items = append(l.items, item{t, l.start, l.pos, l.line})
|
||||
// Some items contain text internally. If so, count their newlines.
|
||||
switch t {
|
||||
case itemName:
|
||||
l.line += strings.Count(l.input[l.start:l.pos], "\n")
|
||||
for i := l.start; i < l.pos; i++ {
|
||||
if l.input[i] == '\n' {
|
||||
l.line++
|
||||
}
|
||||
}
|
||||
}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.line += strings.Count(l.input[l.start:l.pos], "\n")
|
||||
for i := l.start; i < l.pos; i++ {
|
||||
if l.input[i] == '\n' {
|
||||
l.line++
|
||||
}
|
||||
}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.ContainsRune(valid, l.next()) {
|
||||
func (l *lexer) accept(valid []byte) bool {
|
||||
if bytes.ContainsRune(valid, l.next()) {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
@ -191,8 +185,8 @@ func (l *lexer) acceptComment() {
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.ContainsRune(valid, l.next()) {
|
||||
func (l *lexer) acceptRun(valid []byte) {
|
||||
for bytes.ContainsRune(valid, l.next()) {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
@ -200,24 +194,27 @@ func (l *lexer) acceptRun(valid string) {
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items = append(l.items, item{itemError, l.start,
|
||||
fmt.Sprintf(format, args...), l.line})
|
||||
l.err = fmt.Errorf(format, args...)
|
||||
l.items = append(l.items, item{itemError, l.start, l.pos, l.line})
|
||||
return nil
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(input string) (*lexer, error) {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make([]item, 0, 100),
|
||||
line: 1,
|
||||
func lex(l *lexer, input []byte) error {
|
||||
if len(input) == 0 {
|
||||
return errors.New("empty query")
|
||||
}
|
||||
|
||||
l.input = input
|
||||
l.items = l.itemsA[:0]
|
||||
l.line = 1
|
||||
|
||||
l.run()
|
||||
|
||||
if last := l.items[len(l.items)-1]; last.typ == itemError {
|
||||
return nil, fmt.Errorf(last.val)
|
||||
return l.err
|
||||
}
|
||||
return l, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
@ -251,9 +248,11 @@ func lexRoot(l *lexer) stateFn {
|
||||
case r == '$':
|
||||
l.ignore()
|
||||
if l.acceptAlphaNum() {
|
||||
s, e := l.current()
|
||||
lowercase(l.input, s, e)
|
||||
l.emit(itemVariable)
|
||||
}
|
||||
case strings.ContainsRune("!():=[]{|}", r):
|
||||
case contains(l.input, l.start, l.pos, punctuatorToken):
|
||||
if item, ok := punctuators[r]; ok {
|
||||
l.emit(item)
|
||||
} else {
|
||||
@ -264,7 +263,7 @@ func lexRoot(l *lexer) stateFn {
|
||||
return lexString
|
||||
case r == '.':
|
||||
if len(l.input) >= 3 {
|
||||
if strings.HasSuffix(l.input[:l.pos], "...") {
|
||||
if equals(l.input, 0, 3, spreadToken) {
|
||||
l.emit(itemSpread)
|
||||
return lexRoot
|
||||
}
|
||||
@ -286,32 +285,28 @@ func lexRoot(l *lexer) stateFn {
|
||||
func lexName(l *lexer) stateFn {
|
||||
for {
|
||||
r := l.next()
|
||||
|
||||
if r == eof {
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
v := l.current()
|
||||
s, e := l.current()
|
||||
|
||||
lowercase(l.input, s, e)
|
||||
|
||||
if len(v) == 0 {
|
||||
switch {
|
||||
case strings.EqualFold(v, "query"):
|
||||
case equals(l.input, s, e, queryToken):
|
||||
l.emit(itemQuery)
|
||||
break
|
||||
case strings.EqualFold(v, "mutation"):
|
||||
case equals(l.input, s, e, mutationToken):
|
||||
l.emit(itemMutation)
|
||||
break
|
||||
case strings.EqualFold(v, "subscription"):
|
||||
case equals(l.input, s, e, subscriptionToken):
|
||||
l.emit(itemSub)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.EqualFold(v, "true"):
|
||||
case equals(l.input, s, e, trueToken):
|
||||
l.emit(itemBoolVal)
|
||||
case strings.EqualFold(v, "false"):
|
||||
case equals(l.input, s, e, falseToken):
|
||||
l.emit(itemBoolVal)
|
||||
default:
|
||||
l.emit(itemName)
|
||||
@ -324,7 +319,7 @@ func lexName(l *lexer) stateFn {
|
||||
|
||||
// lexString scans a string.
|
||||
func lexString(l *lexer) stateFn {
|
||||
if l.accept("\"'") {
|
||||
if l.accept([]byte(quotesToken)) {
|
||||
l.ignore()
|
||||
|
||||
for {
|
||||
@ -336,7 +331,7 @@ func lexString(l *lexer) stateFn {
|
||||
if r == '\'' || r == '"' {
|
||||
l.backup()
|
||||
l.emit(itemStringVal)
|
||||
if l.accept("\"'") {
|
||||
if l.accept(quotesToken) {
|
||||
l.ignore()
|
||||
}
|
||||
break
|
||||
@ -353,20 +348,19 @@ func lexString(l *lexer) stateFn {
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
var it itemType
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
l.accept(signsToken)
|
||||
|
||||
// Is it integer
|
||||
digits := "0123456789"
|
||||
if l.accept(digits) {
|
||||
l.acceptRun(digits)
|
||||
if l.accept(digitToken) {
|
||||
l.acceptRun(digitToken)
|
||||
it = itemIntVal
|
||||
}
|
||||
|
||||
// Is it float
|
||||
if l.peek() == '.' {
|
||||
if l.accept(".") {
|
||||
if l.accept(digits) {
|
||||
l.acceptRun(digits)
|
||||
if l.accept(dotToken) {
|
||||
if l.accept(digitToken) {
|
||||
l.acceptRun(digitToken)
|
||||
it = itemFloatVal
|
||||
}
|
||||
} else {
|
||||
@ -394,7 +388,7 @@ func isSpace(r rune) bool {
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
return r == '\r' || r == '\n' || r == eof
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
@ -402,6 +396,74 @@ func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
|
||||
func equals(b []byte, s Pos, e Pos, val []byte) bool {
|
||||
n := 0
|
||||
for i := s; i < e; i++ {
|
||||
if n >= len(val) {
|
||||
return true
|
||||
}
|
||||
switch {
|
||||
case b[i] >= 'A' && b[i] <= 'Z' && ('a'+(b[i]-'A')) != val[n]:
|
||||
return false
|
||||
case b[i] != val[n]:
|
||||
return false
|
||||
}
|
||||
n++
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func contains(b []byte, s Pos, e Pos, val []byte) bool {
|
||||
for i := s; i < e; i++ {
|
||||
for n := 0; n < len(val); n++ {
|
||||
if b[i] == val[n] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func lowercase(b []byte, s Pos, e Pos) {
|
||||
for i := s; i < e; i++ {
|
||||
if b[i] >= 'A' && b[i] <= 'Z' {
|
||||
b[i] = ('a' + (b[i] - 'A'))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *item) String() string {
|
||||
var v string
|
||||
|
||||
switch i.typ {
|
||||
case itemEOF:
|
||||
v = "EOF"
|
||||
case itemError:
|
||||
v = "error"
|
||||
case itemName:
|
||||
v = "name"
|
||||
case itemQuery:
|
||||
v = "query"
|
||||
case itemMutation:
|
||||
v = "mutation"
|
||||
case itemSub:
|
||||
v = "subscription"
|
||||
case itemPunctuator:
|
||||
v = "punctuator"
|
||||
case itemDirective:
|
||||
v = "directive"
|
||||
case itemVariable:
|
||||
v = "variable"
|
||||
case itemIntVal:
|
||||
v = "int"
|
||||
case itemFloatVal:
|
||||
v = "float"
|
||||
case itemStringVal:
|
||||
v = "string"
|
||||
}
|
||||
return fmt.Sprintf("%s", v)
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
359
qcode/parse.go
@ -3,6 +3,8 @@ package qcode
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/dosco/super-graph/util"
|
||||
)
|
||||
@ -11,9 +13,12 @@ var (
|
||||
errEOT = errors.New("end of tokens")
|
||||
)
|
||||
|
||||
type parserType int16
|
||||
type parserType int32
|
||||
|
||||
const (
|
||||
maxFields = 100
|
||||
maxArgs = 10
|
||||
|
||||
parserError parserType = iota
|
||||
parserEOF
|
||||
opQuery
|
||||
@ -28,51 +33,30 @@ const (
|
||||
nodeVar
|
||||
)
|
||||
|
||||
func (t parserType) String() string {
|
||||
var v string
|
||||
|
||||
switch t {
|
||||
case parserEOF:
|
||||
v = "EOF"
|
||||
case parserError:
|
||||
v = "error"
|
||||
case opQuery:
|
||||
v = "query"
|
||||
case opMutate:
|
||||
v = "mutation"
|
||||
case opSub:
|
||||
v = "subscription"
|
||||
case nodeStr:
|
||||
v = "node-string"
|
||||
case nodeInt:
|
||||
v = "node-int"
|
||||
case nodeFloat:
|
||||
v = "node-float"
|
||||
case nodeBool:
|
||||
v = "node-bool"
|
||||
case nodeObj:
|
||||
v = "node-obj"
|
||||
case nodeList:
|
||||
v = "node-list"
|
||||
}
|
||||
return fmt.Sprintf("<%s>", v)
|
||||
}
|
||||
|
||||
type Operation struct {
|
||||
Type parserType
|
||||
Name string
|
||||
Args []*Arg
|
||||
Fields []*Field
|
||||
FieldLen int16
|
||||
Args []Arg
|
||||
argsA [10]Arg
|
||||
Fields []Field
|
||||
fieldsA [10]Field
|
||||
}
|
||||
|
||||
var zeroOperation = Operation{}
|
||||
|
||||
func (o *Operation) Reset() {
|
||||
*o = zeroOperation
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
ID int16
|
||||
ID int32
|
||||
ParentID int32
|
||||
Name string
|
||||
Alias string
|
||||
Args []*Arg
|
||||
Parent *Field
|
||||
Children []*Field
|
||||
Args []Arg
|
||||
argsA [5]Arg
|
||||
Children []int32
|
||||
childrenA [5]int32
|
||||
}
|
||||
|
||||
type Arg struct {
|
||||
@ -86,58 +70,109 @@ type Node struct {
|
||||
Val string
|
||||
Parent *Node
|
||||
Children []*Node
|
||||
exp *Exp
|
||||
}
|
||||
|
||||
var zeroNode = Node{}
|
||||
|
||||
func (n *Node) Reset() {
|
||||
*n = zeroNode
|
||||
}
|
||||
|
||||
type Parser struct {
|
||||
input []byte // the string being scanned
|
||||
pos int
|
||||
items []item
|
||||
depth int
|
||||
err error
|
||||
}
|
||||
|
||||
func Parse(gql string) (*Operation, error) {
|
||||
var nodePool = sync.Pool{
|
||||
New: func() interface{} { return new(Node) },
|
||||
}
|
||||
|
||||
var opPool = sync.Pool{
|
||||
New: func() interface{} { return new(Operation) },
|
||||
}
|
||||
|
||||
var lexPool = sync.Pool{
|
||||
New: func() interface{} { return new(lexer) },
|
||||
}
|
||||
|
||||
func Parse(gql []byte) (*Operation, error) {
|
||||
return parseSelectionSet(gql)
|
||||
}
|
||||
|
||||
func ParseArgValue(argVal string) (*Node, error) {
|
||||
l := lexPool.Get().(*lexer)
|
||||
l.Reset()
|
||||
|
||||
if err := lex(l, []byte(argVal)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &Parser{
|
||||
input: l.input,
|
||||
pos: -1,
|
||||
items: l.items,
|
||||
}
|
||||
op, err := p.parseValue()
|
||||
lexPool.Put(l)
|
||||
|
||||
return op, err
|
||||
}
|
||||
|
||||
func parseSelectionSet(gql []byte) (*Operation, error) {
|
||||
var err error
|
||||
|
||||
if len(gql) == 0 {
|
||||
return nil, errors.New("blank query")
|
||||
}
|
||||
|
||||
l, err := lex(gql)
|
||||
if err != nil {
|
||||
l := lexPool.Get().(*lexer)
|
||||
l.Reset()
|
||||
|
||||
if err = lex(l, gql); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &Parser{
|
||||
pos: -1,
|
||||
items: l.items,
|
||||
}
|
||||
return p.parseOp()
|
||||
}
|
||||
|
||||
func ParseQuery(gql string) (*Operation, error) {
|
||||
return parseByType(gql, opQuery)
|
||||
}
|
||||
|
||||
func ParseArgValue(argVal string) (*Node, error) {
|
||||
l, err := lex(argVal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &Parser{
|
||||
input: l.input,
|
||||
pos: -1,
|
||||
items: l.items,
|
||||
}
|
||||
|
||||
return p.parseValue()
|
||||
var op *Operation
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
}
|
||||
|
||||
func parseByType(gql string, ty parserType) (*Operation, error) {
|
||||
l, err := lex(gql)
|
||||
if p.peek(itemName) {
|
||||
op = opPool.Get().(*Operation)
|
||||
op.Reset()
|
||||
|
||||
op.Type = opQuery
|
||||
op.Name = ""
|
||||
op.Fields = op.fieldsA[:0]
|
||||
op.Args = op.argsA[:0]
|
||||
op.Fields, err = p.parseFields(op.Fields)
|
||||
|
||||
} else {
|
||||
op, err = p.parseOp()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &Parser{
|
||||
pos: -1,
|
||||
items: l.items,
|
||||
}
|
||||
return p.parseOpByType(ty)
|
||||
|
||||
lexPool.Put(l)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return op, err
|
||||
}
|
||||
|
||||
func (p *Parser) next() item {
|
||||
@ -184,17 +219,37 @@ func (p *Parser) peek(types ...itemType) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Parser) parseOpByType(ty parserType) (*Operation, error) {
|
||||
op := &Operation{Type: ty}
|
||||
func (p *Parser) parseOp() (*Operation, error) {
|
||||
if !p.peek(itemQuery, itemMutation, itemSub) {
|
||||
err := errors.New("expecting a query, mutation or subscription")
|
||||
return nil, err
|
||||
}
|
||||
item := p.next()
|
||||
|
||||
op := opPool.Get().(*Operation)
|
||||
op.Reset()
|
||||
|
||||
switch item.typ {
|
||||
case itemQuery:
|
||||
op.Type = opQuery
|
||||
case itemMutation:
|
||||
op.Type = opMutate
|
||||
case itemSub:
|
||||
op.Type = opSub
|
||||
}
|
||||
|
||||
op.Fields = op.fieldsA[:0]
|
||||
op.Args = op.argsA[:0]
|
||||
|
||||
var err error
|
||||
|
||||
if p.peek(itemName) {
|
||||
op.Name = p.next().val
|
||||
op.Name = p.val(p.next())
|
||||
}
|
||||
|
||||
if p.peek(itemArgsOpen) {
|
||||
p.ignore()
|
||||
op.Args, err = p.parseArgs()
|
||||
op.Args, err = p.parseArgs(op.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -202,47 +257,23 @@ func (p *Parser) parseOpByType(ty parserType) (*Operation, error) {
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
n := int16(0)
|
||||
op.Fields, n, err = p.parseFields()
|
||||
op.Fields, err = p.parseFields(op.Fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op.FieldLen = n
|
||||
}
|
||||
|
||||
if p.peek(itemObjClose) {
|
||||
p.ignore()
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseOp() (*Operation, error) {
|
||||
if p.peek(itemQuery, itemMutation, itemSub) == false {
|
||||
err := fmt.Errorf("expecting a query, mutation or subscription (not '%s')", p.next().val)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item := p.next()
|
||||
|
||||
switch item.typ {
|
||||
case itemQuery:
|
||||
return p.parseOpByType(opQuery)
|
||||
case itemMutation:
|
||||
return p.parseOpByType(opMutate)
|
||||
case itemSub:
|
||||
return p.parseOpByType(opSub)
|
||||
}
|
||||
|
||||
return nil, errors.New("unknown operation type")
|
||||
}
|
||||
|
||||
func (p *Parser) parseFields() ([]*Field, int16, error) {
|
||||
var roots []*Field
|
||||
func (p *Parser) parseFields(fields []Field) ([]Field, error) {
|
||||
st := util.NewStack()
|
||||
i := int16(0)
|
||||
|
||||
for {
|
||||
if len(fields) >= maxFields {
|
||||
return nil, fmt.Errorf("too many fields (max %d)", maxFields)
|
||||
}
|
||||
|
||||
if p.peek(itemObjClose) {
|
||||
p.ignore()
|
||||
st.Pop()
|
||||
@ -253,73 +284,74 @@ func (p *Parser) parseFields() ([]*Field, int16, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 500 {
|
||||
return nil, 0, errors.New("too many fields")
|
||||
}
|
||||
|
||||
if p.peek(itemName) == false {
|
||||
return nil, 0, errors.New("expecting an alias or field name")
|
||||
return nil, errors.New("expecting an alias or field name")
|
||||
}
|
||||
|
||||
field, err := p.parseField()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
fields = append(fields, Field{ID: int32(len(fields))})
|
||||
|
||||
f := &fields[(len(fields) - 1)]
|
||||
f.Args = f.argsA[:0]
|
||||
f.Children = f.childrenA[:0]
|
||||
|
||||
if err := p.parseField(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field.ID = i
|
||||
i++
|
||||
|
||||
if st.Len() == 0 {
|
||||
roots = append(roots, field)
|
||||
|
||||
} else {
|
||||
if f.ID != 0 {
|
||||
intf := st.Peek()
|
||||
parent, ok := intf.(*Field)
|
||||
if !ok || parent == nil {
|
||||
return nil, 0, fmt.Errorf("unexpected value encountered %v", intf)
|
||||
pid, ok := intf.(int32)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("14: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
field.Parent = parent
|
||||
parent.Children = append(parent.Children, field)
|
||||
|
||||
f.ParentID = pid
|
||||
fields[pid].Children = append(fields[pid].Children, f.ID)
|
||||
}
|
||||
|
||||
if p.peek(itemObjOpen) {
|
||||
p.ignore()
|
||||
st.Push(field)
|
||||
st.Push(f.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return roots, i, nil
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseField() (*Field, error) {
|
||||
func (p *Parser) parseField(f *Field) error {
|
||||
var err error
|
||||
field := &Field{Name: p.next().val}
|
||||
f.Name = p.val(p.next())
|
||||
|
||||
if p.peek(itemColon) {
|
||||
p.ignore()
|
||||
|
||||
if p.peek(itemName) {
|
||||
field.Alias = field.Name
|
||||
field.Name = p.next().val
|
||||
f.Alias = f.Name
|
||||
f.Name = p.val(p.next())
|
||||
} else {
|
||||
return nil, errors.New("expecting an aliased field name")
|
||||
return errors.New("expecting an aliased field name")
|
||||
}
|
||||
}
|
||||
|
||||
if p.peek(itemArgsOpen) {
|
||||
p.ignore()
|
||||
if field.Args, err = p.parseArgs(); err != nil {
|
||||
return nil, err
|
||||
if f.Args, err = p.parseArgs(f.Args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return field, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseArgs() ([]*Arg, error) {
|
||||
var args []*Arg
|
||||
func (p *Parser) parseArgs(args []Arg) ([]Arg, error) {
|
||||
var err error
|
||||
|
||||
for {
|
||||
if len(args) >= maxArgs {
|
||||
return nil, fmt.Errorf("too many args (max %d)", maxArgs)
|
||||
}
|
||||
|
||||
if p.peek(itemArgsClose) {
|
||||
p.ignore()
|
||||
break
|
||||
@ -327,7 +359,8 @@ func (p *Parser) parseArgs() ([]*Arg, error) {
|
||||
if p.peek(itemName) == false {
|
||||
return nil, errors.New("expecting an argument name")
|
||||
}
|
||||
arg := &Arg{Name: p.next().val}
|
||||
args = append(args, Arg{Name: p.val(p.next())})
|
||||
arg := &args[(len(args) - 1)]
|
||||
|
||||
if p.peek(itemColon) == false {
|
||||
return nil, errors.New("missing ':' after argument name")
|
||||
@ -338,16 +371,17 @@ func (p *Parser) parseArgs() ([]*Arg, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, arg)
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseList() (*Node, error) {
|
||||
parent := &Node{}
|
||||
var nodes []*Node
|
||||
var ty parserType
|
||||
nodes := []*Node{}
|
||||
|
||||
parent := nodePool.Get().(*Node)
|
||||
parent.Reset()
|
||||
|
||||
var ty parserType
|
||||
for {
|
||||
if p.peek(itemListClose) {
|
||||
p.ignore()
|
||||
@ -378,8 +412,10 @@ func (p *Parser) parseList() (*Node, error) {
|
||||
}
|
||||
|
||||
func (p *Parser) parseObj() (*Node, error) {
|
||||
parent := &Node{}
|
||||
var nodes []*Node
|
||||
nodes := []*Node{}
|
||||
|
||||
parent := nodePool.Get().(*Node)
|
||||
parent.Reset()
|
||||
|
||||
for {
|
||||
if p.peek(itemObjClose) {
|
||||
@ -390,7 +426,7 @@ func (p *Parser) parseObj() (*Node, error) {
|
||||
if p.peek(itemName) == false {
|
||||
return nil, errors.New("expecting an argument name")
|
||||
}
|
||||
nodeName := p.next().val
|
||||
nodeName := p.val(p.next())
|
||||
|
||||
if p.peek(itemColon) == false {
|
||||
return nil, errors.New("missing ':' after Field argument name")
|
||||
@ -424,7 +460,8 @@ func (p *Parser) parseValue() (*Node, error) {
|
||||
}
|
||||
|
||||
item := p.next()
|
||||
node := &Node{}
|
||||
node := nodePool.Get().(*Node)
|
||||
node.Reset()
|
||||
|
||||
switch item.typ {
|
||||
case itemIntVal:
|
||||
@ -440,9 +477,53 @@ func (p *Parser) parseValue() (*Node, error) {
|
||||
case itemVariable:
|
||||
node.Type = nodeVar
|
||||
default:
|
||||
return nil, fmt.Errorf("expecting a number, string, object, list or variable as an argument value (not %s)", p.next().val)
|
||||
return nil, fmt.Errorf("expecting a number, string, object, list or variable as an argument value (not %s)", p.val(p.next()))
|
||||
}
|
||||
node.Val = item.val
|
||||
node.Val = p.val(item)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) val(v item) string {
|
||||
return b2s(p.input[v.pos:v.end])
|
||||
}
|
||||
|
||||
func b2s(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func (t parserType) String() string {
|
||||
var v string
|
||||
|
||||
switch t {
|
||||
case parserEOF:
|
||||
v = "EOF"
|
||||
case parserError:
|
||||
v = "error"
|
||||
case opQuery:
|
||||
v = "query"
|
||||
case opMutate:
|
||||
v = "mutation"
|
||||
case opSub:
|
||||
v = "subscription"
|
||||
case nodeStr:
|
||||
v = "node-string"
|
||||
case nodeInt:
|
||||
v = "node-int"
|
||||
case nodeFloat:
|
||||
v = "node-float"
|
||||
case nodeBool:
|
||||
v = "node-bool"
|
||||
case nodeVar:
|
||||
v = "node-var"
|
||||
case nodeObj:
|
||||
v = "node-obj"
|
||||
case nodeList:
|
||||
v = "node-list"
|
||||
}
|
||||
return fmt.Sprintf("<%s>", v)
|
||||
}
|
||||
|
||||
func FreeNode(n *Node) {
|
||||
nodePool.Put(n)
|
||||
}
|
||||
|
@ -2,11 +2,10 @@ package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/*
|
||||
func compareOp(op1, op2 Operation) error {
|
||||
if op1.Type != op2.Type {
|
||||
return errors.New("operator type mismatch")
|
||||
@ -44,10 +43,128 @@ func compareOp(op1, op2 Operation) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
func TestCompile1(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
_, err := qcompile.Compile([]byte(`
|
||||
product(id: 15) {
|
||||
id
|
||||
name
|
||||
}`))
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
func TestCompile2(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
_, err := qcompile.Compile([]byte(`
|
||||
query { product(id: 15) {
|
||||
id
|
||||
name
|
||||
} }`))
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompile3(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
_, err := qcompile.Compile([]byte(`
|
||||
mutation {
|
||||
product(id: 15, name: "Test") {
|
||||
id
|
||||
name
|
||||
}
|
||||
}`))
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCompile1(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(`#`))
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCompile2(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(`{u(where:{not:0})}`))
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyCompile(t *testing.T) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
_, err := qcompile.Compile([]byte(``))
|
||||
|
||||
if err == nil {
|
||||
t.Fatal(errors.New("expecting an error"))
|
||||
}
|
||||
}
|
||||
|
||||
var gql = []byte(`
|
||||
products(
|
||||
# returns only 30 items
|
||||
limit: 30,
|
||||
|
||||
# starts from item 10, commented out for now
|
||||
# offset: 10,
|
||||
|
||||
# orders the response items by highest price
|
||||
order_by: { price: desc },
|
||||
|
||||
# no duplicate prices returned
|
||||
distinct: [ price ]
|
||||
|
||||
# only items with an id >= 30 and < 30 are returned
|
||||
where: { id: { AND: { greater_or_equals: 20, lt: 28 } } }) {
|
||||
id
|
||||
name
|
||||
price
|
||||
}`)
|
||||
|
||||
func BenchmarkQCompile(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := qcompile.Compile(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQCompileP(b *testing.B) {
|
||||
qcompile, _ := NewCompiler(Config{})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, err := qcompile.Compile(gql)
|
||||
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
3
qcode/pprof_cpu.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
go test -bench=. -benchmem -cpuprofile cpu.out -run=XXX
|
||||
go tool pprof -cum cpu.out
|
3
qcode/pprof_mem.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
go test -bench=. -benchmem -memprofile mem.out -run=XXX
|
||||
go tool pprof -cum mem.out
|
641
qcode/qcode.go
@ -1,19 +1,32 @@
|
||||
package qcode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/dosco/super-graph/util"
|
||||
"github.com/gobuffalo/flect"
|
||||
)
|
||||
|
||||
type QCode struct {
|
||||
Query *Query
|
||||
}
|
||||
type QType int
|
||||
type Action int
|
||||
|
||||
type Query struct {
|
||||
Select *Select
|
||||
const (
|
||||
maxSelectors = 30
|
||||
|
||||
QTQuery QType = iota + 1
|
||||
QTMutation
|
||||
|
||||
ActionInsert Action = iota + 1
|
||||
ActionUpdate
|
||||
ActionDelete
|
||||
)
|
||||
|
||||
type QCode struct {
|
||||
Type QType
|
||||
Selects []Select
|
||||
}
|
||||
|
||||
type Column struct {
|
||||
@ -23,18 +36,19 @@ type Column struct {
|
||||
}
|
||||
|
||||
type Select struct {
|
||||
ID int16
|
||||
ID int32
|
||||
ParentID int32
|
||||
Args map[string]*Node
|
||||
AsList bool
|
||||
Table string
|
||||
Singular string
|
||||
FieldName string
|
||||
Cols []*Column
|
||||
Cols []Column
|
||||
Where *Exp
|
||||
OrderBy []*OrderBy
|
||||
DistinctOn []string
|
||||
Paging Paging
|
||||
Joins []*Select
|
||||
Action Action
|
||||
ActionVar string
|
||||
Children []int32
|
||||
}
|
||||
|
||||
type Exp struct {
|
||||
@ -46,6 +60,14 @@ type Exp struct {
|
||||
ListType ValType
|
||||
ListVal []string
|
||||
Children []*Exp
|
||||
childrenA [5]*Exp
|
||||
doFree bool
|
||||
}
|
||||
|
||||
var zeroExp = Exp{doFree: true}
|
||||
|
||||
func (ex *Exp) Reset() {
|
||||
*ex = zeroExp
|
||||
}
|
||||
|
||||
type OrderBy struct {
|
||||
@ -89,66 +111,6 @@ const (
|
||||
OpTsQuery
|
||||
)
|
||||
|
||||
func (t ExpOp) String() string {
|
||||
var v string
|
||||
|
||||
switch t {
|
||||
case OpNop:
|
||||
v = "op-nop"
|
||||
case OpAnd:
|
||||
v = "op-and"
|
||||
case OpOr:
|
||||
v = "op-or"
|
||||
case OpNot:
|
||||
v = "op-not"
|
||||
case OpEquals:
|
||||
v = "op-equals"
|
||||
case OpNotEquals:
|
||||
v = "op-not-equals"
|
||||
case OpGreaterOrEquals:
|
||||
v = "op-greater-or-equals"
|
||||
case OpLesserOrEquals:
|
||||
v = "op-lesser-or-equals"
|
||||
case OpGreaterThan:
|
||||
v = "op-greater-than"
|
||||
case OpLesserThan:
|
||||
v = "op-lesser-than"
|
||||
case OpIn:
|
||||
v = "op-in"
|
||||
case OpNotIn:
|
||||
v = "op-not-in"
|
||||
case OpLike:
|
||||
v = "op-like"
|
||||
case OpNotLike:
|
||||
v = "op-not-like"
|
||||
case OpILike:
|
||||
v = "op-i-like"
|
||||
case OpNotILike:
|
||||
v = "op-not-i-like"
|
||||
case OpSimilar:
|
||||
v = "op-similar"
|
||||
case OpNotSimilar:
|
||||
v = "op-not-similar"
|
||||
case OpContains:
|
||||
v = "op-contains"
|
||||
case OpContainedIn:
|
||||
v = "op-contained-in"
|
||||
case OpHasKey:
|
||||
v = "op-has-key"
|
||||
case OpHasKeyAny:
|
||||
v = "op-has-key-any"
|
||||
case OpHasKeyAll:
|
||||
v = "op-has-key-all"
|
||||
case OpIsNull:
|
||||
v = "op-is-null"
|
||||
case OpEqID:
|
||||
v = "op-eq-id"
|
||||
case OpTsQuery:
|
||||
v = "op-ts-query"
|
||||
}
|
||||
return fmt.Sprintf("<%s>", v)
|
||||
}
|
||||
|
||||
type ValType int
|
||||
|
||||
const (
|
||||
@ -183,236 +145,269 @@ const (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Filter []string
|
||||
DefaultFilter []string
|
||||
FilterMap map[string][]string
|
||||
Blacklist []string
|
||||
Blocklist []string
|
||||
KeepArgs bool
|
||||
}
|
||||
|
||||
type Compiler struct {
|
||||
fl *Exp
|
||||
fm map[string]*Exp
|
||||
bl map[string]struct{}
|
||||
ka bool
|
||||
}
|
||||
|
||||
func NewCompiler(conf Config) (*Compiler, error) {
|
||||
bl := make(map[string]struct{}, len(conf.Blacklist))
|
||||
|
||||
for i := range conf.Blacklist {
|
||||
bl[strings.ToLower(conf.Blacklist[i])] = struct{}{}
|
||||
var opMap = map[parserType]QType{
|
||||
opQuery: QTQuery,
|
||||
opMutate: QTMutation,
|
||||
}
|
||||
|
||||
fl, err := compileFilter(conf.Filter)
|
||||
var expPool = sync.Pool{
|
||||
New: func() interface{} { return &Exp{doFree: true} },
|
||||
}
|
||||
|
||||
func NewCompiler(c Config) (*Compiler, error) {
|
||||
bl := make(map[string]struct{}, len(c.Blocklist))
|
||||
|
||||
for i := range c.Blocklist {
|
||||
bl[c.Blocklist[i]] = struct{}{}
|
||||
}
|
||||
|
||||
fl, err := compileFilter(c.DefaultFilter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fm := make(map[string]*Exp, len(conf.FilterMap))
|
||||
fm := make(map[string]*Exp, len(c.FilterMap))
|
||||
|
||||
for k, v := range conf.FilterMap {
|
||||
for k, v := range c.FilterMap {
|
||||
fil, err := compileFilter(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fm[strings.ToLower(k)] = fil
|
||||
singular := flect.Singularize(k)
|
||||
plural := flect.Pluralize(k)
|
||||
|
||||
fm[singular] = fil
|
||||
fm[plural] = fil
|
||||
}
|
||||
|
||||
return &Compiler{fl, fm, bl}, nil
|
||||
seedExp := [100]Exp{}
|
||||
for i := range seedExp {
|
||||
seedExp[i].doFree = true
|
||||
expPool.Put(&seedExp[i])
|
||||
}
|
||||
|
||||
func (com *Compiler) CompileQuery(query string) (*QCode, error) {
|
||||
return &Compiler{fl, fm, bl, c.KeepArgs}, nil
|
||||
}
|
||||
|
||||
func (com *Compiler) Compile(query []byte) (*QCode, error) {
|
||||
var qc QCode
|
||||
var err error
|
||||
|
||||
op, err := ParseQuery(query)
|
||||
op, err := Parse(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch op.Type {
|
||||
case opQuery:
|
||||
qc.Query, err = com.compileQuery(op)
|
||||
case opMutate:
|
||||
case opSub:
|
||||
default:
|
||||
err = fmt.Errorf("Unknown operation type %d", op.Type)
|
||||
}
|
||||
|
||||
qc.Selects, err = com.compileQuery(op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t, ok := opMap[op.Type]; ok {
|
||||
qc.Type = t
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unknown operation type %d", op.Type)
|
||||
}
|
||||
|
||||
opPool.Put(op)
|
||||
|
||||
return &qc, nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileQuery(op *Operation) (*Query, error) {
|
||||
var selRoot *Select
|
||||
func (com *Compiler) compileQuery(op *Operation) ([]Select, error) {
|
||||
id := int32(0)
|
||||
parentID := int32(0)
|
||||
|
||||
st := util.NewStack()
|
||||
id := int16(0)
|
||||
fs := make([]*Select, op.FieldLen)
|
||||
selects := make([]Select, 0, 5)
|
||||
st := NewStack()
|
||||
|
||||
for i := range op.Fields {
|
||||
st.Push(op.Fields[i])
|
||||
if len(op.Fields) == 0 {
|
||||
return nil, errors.New("empty query")
|
||||
}
|
||||
st.Push(op.Fields[0].ID)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
field, ok := intf.(*Field)
|
||||
|
||||
if !ok || field == nil {
|
||||
return nil, fmt.Errorf("unexpected value poped out %v", intf)
|
||||
if id >= maxSelectors {
|
||||
return nil, fmt.Errorf("selector limit reached (%d)", maxSelectors)
|
||||
}
|
||||
|
||||
fn := strings.ToLower(field.Name)
|
||||
if _, ok := com.bl[fn]; ok {
|
||||
fid := st.Pop()
|
||||
field := &op.Fields[fid]
|
||||
|
||||
if _, ok := com.bl[field.Name]; ok {
|
||||
continue
|
||||
}
|
||||
tn := flect.Pluralize(fn)
|
||||
|
||||
s := &Select{
|
||||
selects = append(selects, Select{
|
||||
ID: id,
|
||||
Table: tn,
|
||||
}
|
||||
ParentID: parentID,
|
||||
Table: field.Name,
|
||||
Children: make([]int32, 0, 5),
|
||||
})
|
||||
s := &selects[(len(selects) - 1)]
|
||||
|
||||
if fn == tn {
|
||||
s.Singular = flect.Singularize(fn)
|
||||
} else {
|
||||
s.Singular = fn
|
||||
}
|
||||
|
||||
if fn == s.Table {
|
||||
s.AsList = true
|
||||
} else {
|
||||
s.Paging.Limit = "1"
|
||||
if s.ID != 0 {
|
||||
p := &selects[s.ParentID]
|
||||
p.Children = append(p.Children, s.ID)
|
||||
}
|
||||
|
||||
if len(field.Alias) != 0 {
|
||||
s.FieldName = field.Alias
|
||||
} else if s.AsList {
|
||||
s.FieldName = s.Table
|
||||
} else {
|
||||
s.FieldName = s.Singular
|
||||
s.FieldName = s.Table
|
||||
}
|
||||
|
||||
id++
|
||||
fs[field.ID] = s
|
||||
|
||||
err := com.compileArgs(s, field.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range field.Children {
|
||||
f := field.Children[i]
|
||||
fn := strings.ToLower(f.Name)
|
||||
s.Cols = make([]Column, 0, len(field.Children))
|
||||
|
||||
if _, ok := com.bl[fn]; ok {
|
||||
for _, cid := range field.Children {
|
||||
f := op.Fields[cid]
|
||||
|
||||
if _, ok := com.bl[f.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Children == nil {
|
||||
col := &Column{Name: fn}
|
||||
if len(f.Children) != 0 {
|
||||
parentID = s.ID
|
||||
st.Push(f.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
col := Column{Name: f.Name}
|
||||
|
||||
if len(f.Alias) != 0 {
|
||||
col.FieldName = f.Alias
|
||||
} else {
|
||||
col.FieldName = f.Name
|
||||
}
|
||||
|
||||
s.Cols = append(s.Cols, col)
|
||||
} else {
|
||||
st.Push(f)
|
||||
}
|
||||
}
|
||||
|
||||
if field.Parent == nil {
|
||||
selRoot = s
|
||||
} else {
|
||||
sp := fs[field.Parent.ID]
|
||||
sp.Joins = append(sp.Joins, s)
|
||||
}
|
||||
id++
|
||||
}
|
||||
|
||||
fil, ok := com.fm[selRoot.Table]
|
||||
if !ok {
|
||||
var ok bool
|
||||
var fil *Exp
|
||||
|
||||
if id > 0 {
|
||||
root := &selects[0]
|
||||
fil, ok = com.fm[root.Table]
|
||||
|
||||
if !ok || fil == nil {
|
||||
fil = com.fl
|
||||
}
|
||||
|
||||
if fil != nil && fil.Op != OpNop {
|
||||
if selRoot.Where != nil {
|
||||
selRoot.Where = &Exp{Op: OpAnd, Children: []*Exp{fil, selRoot.Where}}
|
||||
if root.Where != nil {
|
||||
ow := root.Where
|
||||
|
||||
root.Where = expPool.Get().(*Exp)
|
||||
root.Where.Reset()
|
||||
root.Where.Op = OpAnd
|
||||
root.Where.Children = root.Where.childrenA[:2]
|
||||
root.Where.Children[0] = fil
|
||||
root.Where.Children[1] = ow
|
||||
} else {
|
||||
selRoot.Where = fil
|
||||
root.Where = fil
|
||||
}
|
||||
}
|
||||
|
||||
return &Query{selRoot}, nil
|
||||
} else {
|
||||
return nil, errors.New("invalid query")
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgs(sel *Select, args []*Arg) error {
|
||||
return selects[:id], nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgs(sel *Select, args []Arg) error {
|
||||
var err error
|
||||
|
||||
if com.ka {
|
||||
sel.Args = make(map[string]*Node, len(args))
|
||||
}
|
||||
|
||||
for i := range args {
|
||||
if args[i] == nil {
|
||||
return fmt.Errorf("[Args] unexpected nil argument found")
|
||||
}
|
||||
an := strings.ToLower(args[i].Name)
|
||||
if _, ok := sel.Args[an]; ok {
|
||||
continue
|
||||
}
|
||||
arg := &args[i]
|
||||
|
||||
switch an {
|
||||
switch arg.Name {
|
||||
case "id":
|
||||
if sel.ID == int16(0) {
|
||||
err = com.compileArgID(sel, args[i])
|
||||
if sel.ID == 0 {
|
||||
err = com.compileArgID(sel, arg)
|
||||
}
|
||||
case "search":
|
||||
err = com.compileArgSearch(sel, args[i])
|
||||
err = com.compileArgSearch(sel, arg)
|
||||
case "where":
|
||||
err = com.compileArgWhere(sel, args[i])
|
||||
err = com.compileArgWhere(sel, arg)
|
||||
case "orderby", "order_by", "order":
|
||||
err = com.compileArgOrderBy(sel, args[i])
|
||||
err = com.compileArgOrderBy(sel, arg)
|
||||
case "distinct_on", "distinct":
|
||||
err = com.compileArgDistinctOn(sel, args[i])
|
||||
err = com.compileArgDistinctOn(sel, arg)
|
||||
case "limit":
|
||||
err = com.compileArgLimit(sel, args[i])
|
||||
err = com.compileArgLimit(sel, arg)
|
||||
case "offset":
|
||||
err = com.compileArgOffset(sel, args[i])
|
||||
err = com.compileArgOffset(sel, arg)
|
||||
case "insert":
|
||||
sel.Action = ActionInsert
|
||||
err = com.compileArgAction(sel, arg)
|
||||
case "update":
|
||||
sel.Action = ActionUpdate
|
||||
err = com.compileArgAction(sel, arg)
|
||||
case "delete":
|
||||
sel.Action = ActionDelete
|
||||
err = com.compileArgAction(sel, arg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sel.Args[an] = args[i].Val
|
||||
if sel.Args != nil {
|
||||
sel.Args[arg.Name] = arg.Val
|
||||
} else {
|
||||
nodePool.Put(arg.Val)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type expT struct {
|
||||
parent *Exp
|
||||
node *Node
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgObj(arg *Arg) (*Exp, error) {
|
||||
func (com *Compiler) compileArgObj(st *util.Stack, arg *Arg) (*Exp, error) {
|
||||
if arg.Val.Type != nodeObj {
|
||||
return nil, fmt.Errorf("expecting an object")
|
||||
}
|
||||
|
||||
return com.compileArgNode(arg.Val)
|
||||
return com.compileArgNode(st, arg.Val, true)
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgNode(val *Node) (*Exp, error) {
|
||||
st := util.NewStack()
|
||||
func (com *Compiler) compileArgNode(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
var root *Exp
|
||||
|
||||
st.Push(&expT{nil, val.Children[0]})
|
||||
if node == nil || len(node.Children) == 0 {
|
||||
return nil, errors.New("invalid argument value")
|
||||
}
|
||||
|
||||
pushChild(st, nil, node)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
@ -420,19 +415,23 @@ func (com *Compiler) compileArgNode(val *Node) (*Exp, error) {
|
||||
}
|
||||
|
||||
intf := st.Pop()
|
||||
eT, ok := intf.(*expT)
|
||||
if !ok || eT == nil {
|
||||
return nil, fmt.Errorf("unexpected value poped out %v", intf)
|
||||
node, ok := intf.(*Node)
|
||||
if !ok || node == nil {
|
||||
return nil, fmt.Errorf("16: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
if len(eT.node.Name) != 0 {
|
||||
if _, ok := com.bl[strings.ToLower(eT.node.Name)]; ok {
|
||||
// Objects inside a list
|
||||
if len(node.Name) == 0 {
|
||||
pushChildren(st, node.exp, node)
|
||||
continue
|
||||
|
||||
} else {
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ex, err := newExp(st, eT)
|
||||
|
||||
ex, err := newExp(st, node, usePool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -441,11 +440,31 @@ func (com *Compiler) compileArgNode(val *Node) (*Exp, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if eT.parent == nil {
|
||||
if node.exp == nil {
|
||||
root = ex
|
||||
} else {
|
||||
eT.parent.Children = append(eT.parent.Children, ex)
|
||||
node.exp.Children = append(node.exp.Children, ex)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if com.ka {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
pushChild(st, nil, node)
|
||||
|
||||
for {
|
||||
if st.Len() == 0 {
|
||||
break
|
||||
}
|
||||
intf := st.Pop()
|
||||
node, _ := intf.(*Node)
|
||||
|
||||
for i := range node.Children {
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
nodePool.Put(node)
|
||||
}
|
||||
|
||||
return root, nil
|
||||
@ -456,7 +475,11 @@ func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
ex := &Exp{Op: OpEqID, Val: arg.Val.Val}
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
ex.Op = OpEqID
|
||||
ex.Val = arg.Val.Val
|
||||
|
||||
switch arg.Val.Type {
|
||||
case nodeStr:
|
||||
@ -465,8 +488,10 @@ func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
|
||||
ex.Type = ValInt
|
||||
case nodeFloat:
|
||||
ex.Type = ValFloat
|
||||
case nodeVar:
|
||||
ex.Type = ValVar
|
||||
default:
|
||||
fmt.Errorf("expecting an string, int or float")
|
||||
fmt.Errorf("expecting a string, int, float or variable")
|
||||
}
|
||||
|
||||
sel.Where = ex
|
||||
@ -474,14 +499,22 @@ func (com *Compiler) compileArgID(sel *Select, arg *Arg) error {
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) error {
|
||||
ex := &Exp{
|
||||
Op: OpTsQuery,
|
||||
Type: ValStr,
|
||||
Val: arg.Val.Val,
|
||||
}
|
||||
ex := expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
|
||||
ex.Op = OpTsQuery
|
||||
ex.Type = ValStr
|
||||
ex.Val = arg.Val.Val
|
||||
|
||||
if sel.Where != nil {
|
||||
sel.Where = &Exp{Op: OpAnd, Children: []*Exp{ex, sel.Where}}
|
||||
ow := sel.Where
|
||||
|
||||
sel.Where = expPool.Get().(*Exp)
|
||||
sel.Where.Reset()
|
||||
sel.Where.Op = OpAnd
|
||||
sel.Where.Children = sel.Where.childrenA[:2]
|
||||
sel.Where.Children[0] = ex
|
||||
sel.Where.Children[1] = ow
|
||||
} else {
|
||||
sel.Where = ex
|
||||
}
|
||||
@ -489,15 +522,23 @@ func (com *Compiler) compileArgSearch(sel *Select, arg *Arg) error {
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgWhere(sel *Select, arg *Arg) error {
|
||||
st := util.NewStack()
|
||||
var err error
|
||||
|
||||
ex, err := com.compileArgObj(arg)
|
||||
ex, err := com.compileArgObj(st, arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sel.Where != nil {
|
||||
sel.Where = &Exp{Op: OpAnd, Children: []*Exp{ex, sel.Where}}
|
||||
ow := sel.Where
|
||||
|
||||
sel.Where = expPool.Get().(*Exp)
|
||||
sel.Where.Reset()
|
||||
sel.Where.Op = OpAnd
|
||||
sel.Where.Children = sel.Where.childrenA[:2]
|
||||
sel.Where.Children[0] = ex
|
||||
sel.Where.Children[1] = ow
|
||||
} else {
|
||||
sel.Where = ex
|
||||
}
|
||||
@ -525,10 +566,13 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
node, ok := intf.(*Node)
|
||||
|
||||
if !ok || node == nil {
|
||||
return fmt.Errorf("OrderBy: unexpected value poped out %v", intf)
|
||||
return fmt.Errorf("17: unexpected value %v (%t)", intf, intf)
|
||||
}
|
||||
|
||||
if _, ok := com.bl[strings.ToLower(node.Name)]; ok {
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
if !com.ka {
|
||||
nodePool.Put(node)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -536,13 +580,15 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
for i := range node.Children {
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
if !com.ka {
|
||||
nodePool.Put(node)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ob := &OrderBy{}
|
||||
|
||||
val := strings.ToLower(node.Val)
|
||||
switch val {
|
||||
switch node.Val {
|
||||
case "asc":
|
||||
ob.Order = OrderAsc
|
||||
case "desc":
|
||||
@ -561,6 +607,10 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
|
||||
setOrderByColName(ob, node)
|
||||
sel.OrderBy = append(sel.OrderBy, ob)
|
||||
|
||||
if !com.ka {
|
||||
nodePool.Put(node)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -568,7 +618,7 @@ func (com *Compiler) compileArgOrderBy(sel *Select, arg *Arg) error {
|
||||
func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) error {
|
||||
node := arg.Val
|
||||
|
||||
if _, ok := com.bl[strings.ToLower(node.Name)]; ok {
|
||||
if _, ok := com.bl[node.Name]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -582,6 +632,9 @@ func (com *Compiler) compileArgDistinctOn(sel *Select, arg *Arg) error {
|
||||
|
||||
for i := range node.Children {
|
||||
sel.DistinctOn = append(sel.DistinctOn, node.Children[i].Val)
|
||||
if !com.ka {
|
||||
nodePool.Put(node.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -607,42 +660,64 @@ func (com *Compiler) compileArgOffset(sel *Select, arg *Arg) error {
|
||||
}
|
||||
|
||||
sel.Paging.Offset = node.Val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (com *Compiler) compileArgAction(sel *Select, arg *Arg) error {
|
||||
switch sel.Action {
|
||||
case ActionDelete:
|
||||
if arg.Val.Type != nodeBool {
|
||||
return fmt.Errorf("value for argument '%s' must be a boolean", arg.Name)
|
||||
}
|
||||
if arg.Val.Val == "false" {
|
||||
sel.Action = 0
|
||||
}
|
||||
|
||||
default:
|
||||
if arg.Val.Type != nodeVar {
|
||||
return fmt.Errorf("value for argument '%s' must be a variable", arg.Name)
|
||||
}
|
||||
sel.ActionVar = arg.Val.Val
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func compileMutate() (*Query, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func compileSub() (*Query, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newExp(st *util.Stack, eT *expT) (*Exp, error) {
|
||||
ex := &Exp{}
|
||||
node := eT.node
|
||||
|
||||
if len(node.Name) == 0 {
|
||||
pushChildren(st, eT.parent, node)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
name := strings.ToLower(node.Name)
|
||||
func newExp(st *util.Stack, node *Node, usePool bool) (*Exp, error) {
|
||||
name := node.Name
|
||||
if name[0] == '_' {
|
||||
name = name[1:]
|
||||
}
|
||||
|
||||
var ex *Exp
|
||||
|
||||
if usePool {
|
||||
ex = expPool.Get().(*Exp)
|
||||
ex.Reset()
|
||||
} else {
|
||||
ex = &Exp{doFree: false}
|
||||
}
|
||||
ex.Children = ex.childrenA[:0]
|
||||
|
||||
switch name {
|
||||
case "and":
|
||||
if len(node.Children) == 0 {
|
||||
return nil, errors.New("missing expression after 'AND' operator")
|
||||
}
|
||||
ex.Op = OpAnd
|
||||
pushChildren(st, ex, node)
|
||||
case "or":
|
||||
if len(node.Children) == 0 {
|
||||
return nil, errors.New("missing expression after 'OR' operator")
|
||||
}
|
||||
ex.Op = OpOr
|
||||
pushChildren(st, ex, node)
|
||||
case "not":
|
||||
if len(node.Children) == 0 {
|
||||
return nil, errors.New("missing expression after 'NOT' operator")
|
||||
}
|
||||
ex.Op = OpNot
|
||||
st.Push(&expT{ex, node.Children[0]})
|
||||
pushChild(st, ex, node)
|
||||
case "eq", "equals":
|
||||
ex.Op = OpEquals
|
||||
ex.Val = node.Val
|
||||
@ -704,7 +779,7 @@ func newExp(st *util.Stack, eT *expT) (*Exp, error) {
|
||||
ex.Op = OpIsNull
|
||||
ex.Val = node.Val
|
||||
default:
|
||||
pushChildren(st, eT.parent, node)
|
||||
pushChildren(st, node.exp, node)
|
||||
return nil, nil // skip node
|
||||
}
|
||||
|
||||
@ -751,16 +826,17 @@ func setListVal(ex *Exp, node *Node) {
|
||||
|
||||
func setWhereColName(ex *Exp, node *Node) {
|
||||
var list []string
|
||||
|
||||
for n := node.Parent; n != nil; n = n.Parent {
|
||||
if n.Type != nodeObj {
|
||||
continue
|
||||
}
|
||||
k := strings.ToLower(n.Name)
|
||||
if len(n.Name) != 0 {
|
||||
k := n.Name
|
||||
if k == "and" || k == "or" || k == "not" ||
|
||||
k == "_and" || k == "_or" || k == "_not" {
|
||||
continue
|
||||
}
|
||||
if len(k) != 0 {
|
||||
list = append([]string{k}, list...)
|
||||
}
|
||||
}
|
||||
@ -768,36 +844,44 @@ func setWhereColName(ex *Exp, node *Node) {
|
||||
ex.Col = list[0]
|
||||
|
||||
} else if len(list) > 2 {
|
||||
ex.Col = strings.Join(list, ".")
|
||||
ex.Col = buildPath(list)
|
||||
ex.NestedCol = true
|
||||
}
|
||||
}
|
||||
|
||||
func setOrderByColName(ob *OrderBy, node *Node) {
|
||||
var list []string
|
||||
|
||||
for n := node; n != nil; n = n.Parent {
|
||||
k := strings.ToLower(n.Name)
|
||||
if len(k) != 0 {
|
||||
list = append([]string{k}, list...)
|
||||
if len(n.Name) != 0 {
|
||||
list = append([]string{n.Name}, list...)
|
||||
}
|
||||
}
|
||||
if len(list) != 0 {
|
||||
ob.Col = strings.Join(list, ".")
|
||||
ob.Col = buildPath(list)
|
||||
}
|
||||
}
|
||||
|
||||
func pushChildren(st *util.Stack, ex *Exp, node *Node) {
|
||||
func pushChildren(st *util.Stack, exp *Exp, node *Node) {
|
||||
for i := range node.Children {
|
||||
st.Push(&expT{ex, node.Children[i]})
|
||||
node.Children[i].exp = exp
|
||||
st.Push(node.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
func pushChild(st *util.Stack, exp *Exp, node *Node) {
|
||||
node.Children[0].exp = exp
|
||||
st.Push(node.Children[0])
|
||||
|
||||
}
|
||||
|
||||
func compileFilter(filter []string) (*Exp, error) {
|
||||
var fl *Exp
|
||||
com := &Compiler{}
|
||||
st := util.NewStack()
|
||||
|
||||
if len(filter) == 0 {
|
||||
return &Exp{Op: OpNop}, nil
|
||||
return &Exp{Op: OpNop, doFree: false}, nil
|
||||
}
|
||||
|
||||
for i := range filter {
|
||||
@ -805,15 +889,106 @@ func compileFilter(filter []string) (*Exp, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := com.compileArgNode(node)
|
||||
f, err := com.compileArgNode(st, node, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fl == nil {
|
||||
fl = f
|
||||
} else {
|
||||
fl = &Exp{Op: OpAnd, Children: []*Exp{fl, f}}
|
||||
fl = &Exp{Op: OpAnd, Children: []*Exp{fl, f}, doFree: false}
|
||||
}
|
||||
}
|
||||
return fl, nil
|
||||
}
|
||||
|
||||
func buildPath(a []string) string {
|
||||
switch len(a) {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
return a[0]
|
||||
}
|
||||
|
||||
n := len(a) - 1
|
||||
for i := 0; i < len(a); i++ {
|
||||
n += len(a[i])
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(n)
|
||||
b.WriteString(a[0])
|
||||
for _, s := range a[1:] {
|
||||
b.WriteRune('.')
|
||||
b.WriteString(s)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (t ExpOp) String() string {
|
||||
var v string
|
||||
|
||||
switch t {
|
||||
case OpNop:
|
||||
v = "op-nop"
|
||||
case OpAnd:
|
||||
v = "op-and"
|
||||
case OpOr:
|
||||
v = "op-or"
|
||||
case OpNot:
|
||||
v = "op-not"
|
||||
case OpEquals:
|
||||
v = "op-equals"
|
||||
case OpNotEquals:
|
||||
v = "op-not-equals"
|
||||
case OpGreaterOrEquals:
|
||||
v = "op-greater-or-equals"
|
||||
case OpLesserOrEquals:
|
||||
v = "op-lesser-or-equals"
|
||||
case OpGreaterThan:
|
||||
v = "op-greater-than"
|
||||
case OpLesserThan:
|
||||
v = "op-lesser-than"
|
||||
case OpIn:
|
||||
v = "op-in"
|
||||
case OpNotIn:
|
||||
v = "op-not-in"
|
||||
case OpLike:
|
||||
v = "op-like"
|
||||
case OpNotLike:
|
||||
v = "op-not-like"
|
||||
case OpILike:
|
||||
v = "op-i-like"
|
||||
case OpNotILike:
|
||||
v = "op-not-i-like"
|
||||
case OpSimilar:
|
||||
v = "op-similar"
|
||||
case OpNotSimilar:
|
||||
v = "op-not-similar"
|
||||
case OpContains:
|
||||
v = "op-contains"
|
||||
case OpContainedIn:
|
||||
v = "op-contained-in"
|
||||
case OpHasKey:
|
||||
v = "op-has-key"
|
||||
case OpHasKeyAny:
|
||||
v = "op-has-key-any"
|
||||
case OpHasKeyAll:
|
||||
v = "op-has-key-all"
|
||||
case OpIsNull:
|
||||
v = "op-is-null"
|
||||
case OpEqID:
|
||||
v = "op-eq-id"
|
||||
case OpTsQuery:
|
||||
v = "op-ts-query"
|
||||
}
|
||||
return fmt.Sprintf("<%s>", v)
|
||||
}
|
||||
|
||||
func FreeExp(ex *Exp) {
|
||||
// fmt.Println(">", ex.doFree)
|
||||
if ex.doFree {
|
||||
expPool.Put(ex)
|
||||
}
|
||||
}
|
||||
|