Compare commits

..

8 Commits

31 changed files with 343 additions and 184 deletions

1
.gitignore vendored
View File

@ -38,4 +38,5 @@ release
.gofuzz
*-fuzz.zip
*.test
.firebase

View File

@ -91,6 +91,7 @@ This compiler is what sits at the heart of Super Graph, with layers of useful fu
- Database migrations tool
- Database seeding tool
- Works with Postgres and YugabyteDB
- OpenCensus Support: Zipkin, Prometheus, X-Ray, Stackdriver
## Documentation

View File

@ -77,6 +77,8 @@ cors_debug: true
# exporter: "zipkin"
# endpoint: "http://zipkin:9411/api/v2/spans"
# sample: 0.2
# include_query: false
# include_params: false
auth:
# Can be 'rails' or 'jwt'

View File

@ -19,7 +19,7 @@ func BenchmarkGraphQL(b *testing.B) {
defer db.Close()
// mock.ExpectQuery(`^SELECT jsonb_build_object`).WithArgs()
c := &Config{DefaultBlock: true}
c := &Config{}
sg, err := newSuperGraph(c, db, psql.GetTestDBInfo())
if err != nil {
b.Fatal(err)

View File

@ -41,6 +41,10 @@ func (c *scontext) argMap() func(w io.Writer, tag string) (int, error) {
}
v := fields[0].Value
if isJsonScalarArray(v) {
return w.Write(jsonListToValues(v))
}
// Open and close quotes
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
fields[0].Value = v[1 : len(v)-1]
@ -118,13 +122,17 @@ func (c *scontext) argList(args [][]byte) ([]interface{}, error) {
if v, ok := fields[string(av)]; ok {
switch v[0] {
case '[', '{':
vars[i] = v
if isJsonScalarArray(v) {
vars[i] = jsonListToValues(v)
} else {
vars[i] = v
}
default:
var val interface{}
if err := json.Unmarshal(v, &val); err != nil {
return nil, err
}
vars[i] = val
}
@ -163,6 +171,38 @@ func escSQuote(b []byte) []byte {
return buf.Bytes()
}
func isJsonScalarArray(b []byte) bool {
if b[0] != '[' || b[len(b)-1] != ']' {
return false
}
for i := range b {
switch b[i] {
case '{':
return false
case '[', ' ', '\t', '\n':
continue
default:
return true
}
}
return true
}
func jsonListToValues(b []byte) []byte {
s := 0
for i := 1; i < len(b)-1; i++ {
if b[i] == '"' && s%2 == 0 {
b[i] = '\''
}
if b[i] == '\\' {
s++
} else {
s = 0
}
}
return b[1 : len(b)-1]
}
func argErr(name string) error {
return fmt.Errorf("query requires variable '%s' to be set", name)
}

View File

@ -30,11 +30,12 @@ type Config struct {
// or other database functions
SetUserID bool `mapstructure:"set_user_id"`
// DefaultBlock ensures only tables configured under the `anon` role
// config can be queries if the `anon` role. For example if the table
// `users` is not listed under the anon role then it will be filtered
// out of any unauthenticated queries that mention it.
DefaultBlock bool `mapstructure:"default_block"`
// DefaultAllow reverses the blocked by default behaviour for queries in
// anonymous mode. (anon role)
// For example if the table `users` is not listed under the anon role then
// access to it would by default for unauthenticated queries this reverses
// this behavior (!!! Use with caution !!!!)
DefaultAllow bool `mapstructure:"default_allow"`
// Vars is a map of hardcoded variables that can be leveraged in your
// queries (eg variable admin_id will be $admin_id in the query)

View File

@ -93,8 +93,7 @@ func (sg *SuperGraph) initCompilers() error {
}
sg.qc, err = qcode.NewCompiler(qcode.Config{
DefaultBlock: sg.conf.DefaultBlock,
Blocklist: sg.conf.Blocklist,
Blocklist: sg.conf.Blocklist,
})
if err != nil {
return err
@ -276,9 +275,9 @@ func (c *scontext) resolveSQL() ([]byte, *stmt, error) {
// defaultRole := c.role
if useTx {
row = tx.QueryRow(finalSQL)
row = tx.QueryRowContext(c, finalSQL)
} else {
row = c.sg.db.QueryRow(finalSQL)
row = c.sg.db.QueryRowContext(c, finalSQL)
}
if len(stmts) > 1 {

View File

@ -70,8 +70,8 @@ func (sg *SuperGraph) initConfig() error {
sg.roles["user"] = &ur
}
// If anon role is not defined and DefaultBlock is not then then create it
if _, ok := sg.roles["anon"]; !ok && !c.DefaultBlock {
// If anon role is not defined then create it
if _, ok := sg.roles["anon"]; !ok {
ur := Role{
Name: "anon",
tm: make(map[string]*RoleTable),
@ -206,7 +206,7 @@ func addForeignKey(di *psql.DBInfo, c Column, t Table) error {
func addRoles(c *Config, qc *qcode.Compiler) error {
for _, r := range c.Roles {
for _, t := range r.Tables {
if err := addRole(qc, r, t); err != nil {
if err := addRole(qc, r, t, c.DefaultAllow); err != nil {
return err
}
}
@ -215,9 +215,13 @@ func addRoles(c *Config, qc *qcode.Compiler) error {
return nil
}
func addRole(qc *qcode.Compiler, r Role, t RoleTable) error {
func addRole(qc *qcode.Compiler, r Role, t RoleTable, defaultAllow bool) error {
ro := true // read-only
if defaultAllow {
ro = false
}
if r.Name != "anon" {
ro = false
}

View File

@ -239,8 +239,6 @@ func (al *List) save(item Item) error {
qd := &schema.QueryDocument{}
if err := qd.Parse(item.Query); err != nil {
fmt.Println("##", item.Query)
return err
}
@ -248,8 +246,6 @@ func (al *List) save(item Item) error {
query := buf.String()
buf.Reset()
// fmt.Println(">", query)
item.Name = QueryName(query)
item.key = strings.ToLower(item.Name)

View File

@ -50,7 +50,7 @@ func DropSchema(t *testing.T, db *sql.DB) {
}
func TestSuperGraph(t *testing.T, db *sql.DB, before func(t *testing.T)) {
config := core.Config{DefaultBlock: true}
config := core.Config{}
config.UseAllowList = false
config.AllowListFile = "./allow.list"
config.RolesQuery = `SELECT * FROM users WHERE id = $user_id`

View File

@ -17,10 +17,6 @@ const (
closeBlock = 500
)
var (
ErrAllTablesSkipped = errors.New("all tables skipped. cannot render query")
)
type Variables map[string]json.RawMessage
type Config struct {
@ -92,30 +88,35 @@ func (co *Compiler) compileQuery(qc *qcode.QCode, w io.Writer, vars Variables) (
io.WriteString(c.w, `SELECT jsonb_build_object(`)
for _, id := range qc.Roots {
root := &qc.Selects[id]
if root.SkipRender || len(root.Cols) == 0 {
continue
}
st.Push(root.ID + closeBlock)
st.Push(root.ID)
if i != 0 {
io.WriteString(c.w, `, `)
}
c.renderRootSelect(root)
root := &qc.Selects[id]
if root.SkipRender || len(root.Cols) == 0 {
squoted(c.w, root.FieldName)
io.WriteString(c.w, `, `)
io.WriteString(c.w, `NULL`)
} else {
st.Push(root.ID + closeBlock)
st.Push(root.ID)
c.renderRootSelect(root)
}
i++
}
io.WriteString(c.w, `) as "__root" FROM `)
if i == 0 {
return 0, ErrAllTablesSkipped
}
var ignored uint32
if st.Len() != 0 {
io.WriteString(c.w, `) as "__root" FROM `)
} else {
io.WriteString(c.w, `) as "__root"`)
return ignored, nil
}
for {
if st.Len() == 0 {
break
@ -1026,9 +1027,9 @@ func (c *compilerContext) renderOp(ex *qcode.Exp, ti *DBTableInfo) error {
case qcode.OpLesserThan:
io.WriteString(c.w, `<`)
case qcode.OpIn:
io.WriteString(c.w, `IN`)
io.WriteString(c.w, `= ANY`)
case qcode.OpNotIn:
io.WriteString(c.w, `NOT IN`)
io.WriteString(c.w, `!= ANY`)
case qcode.OpLike:
io.WriteString(c.w, `LIKE`)
case qcode.OpNotLike:
@ -1174,6 +1175,16 @@ func (c *compilerContext) renderVal(ex *qcode.Exp, vars map[string]string, col *
io.WriteString(c.w, `)`)
case ok:
squoted(c.w, val)
case ex.Op == qcode.OpIn || ex.Op == qcode.OpNotIn:
io.WriteString(c.w, ` (string_to_array('{{`)
io.WriteString(c.w, ex.Val)
io.WriteString(c.w, `}}', ',')`)
io.WriteString(c.w, ` :: `)
io.WriteString(c.w, col.Type)
io.WriteString(c.w, `[])`)
return
default:
io.WriteString(c.w, ` '{{`)
io.WriteString(c.w, ex.Val)

View File

@ -7,8 +7,7 @@ import (
)
type Config struct {
Blocklist []string
DefaultBlock bool
Blocklist []string
}
type QueryConfig struct {

View File

@ -180,7 +180,7 @@ var expPool = sync.Pool{
}
func NewCompiler(c Config) (*Compiler, error) {
co := &Compiler{db: c.DefaultBlock}
co := &Compiler{}
co.tr = make(map[string]map[string]*trval)
co.bl = make(map[string]struct{}, len(c.Blocklist))
@ -333,59 +333,82 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
}
trv := com.getRole(role, field.Name)
skipRender := false
switch action {
case QTQuery:
if trv.query.block {
continue
if trv != nil {
switch action {
case QTQuery:
if trv.query.block {
skipRender = true
}
case QTInsert:
if trv.insert.block {
return fmt.Errorf("%s, insert blocked: %s", role, field.Name)
}
case QTUpdate:
if trv.update.block {
return fmt.Errorf("%s, update blocked: %s", role, field.Name)
}
case QTDelete:
if trv.delete.block {
return fmt.Errorf("%s, delete blocked: %s", role, field.Name)
}
}
case QTInsert:
if trv.insert.block {
return fmt.Errorf("insert blocked: %s", field.Name)
}
case QTUpdate:
if trv.update.block {
return fmt.Errorf("update blocked: %s", field.Name)
}
case QTDelete:
if trv.delete.block {
return fmt.Errorf("delete blocked: %s", field.Name)
}
} else if role == "anon" {
skipRender = true
}
selects = append(selects, Select{
ID: id,
ParentID: parentID,
Name: field.Name,
Children: make([]int32, 0, 5),
Allowed: trv.allowedColumns(action),
Functions: true,
ID: id,
ParentID: parentID,
Name: field.Name,
SkipRender: skipRender,
})
s := &selects[(len(selects) - 1)]
switch action {
case QTQuery:
s.Functions = !trv.query.disable.funcs
s.Paging.Limit = trv.query.limit
case QTInsert:
s.PresetMap = trv.insert.psmap
s.PresetList = trv.insert.pslist
case QTUpdate:
s.PresetMap = trv.update.psmap
s.PresetList = trv.update.pslist
}
if len(field.Alias) != 0 {
s.FieldName = field.Alias
} else {
s.FieldName = s.Name
}
if s.ParentID == -1 {
qc.Roots = append(qc.Roots, s.ID)
} else {
p := &selects[s.ParentID]
p.Children = append(p.Children, s.ID)
}
if skipRender {
id++
continue
}
s.Children = make([]int32, 0, 5)
s.Functions = true
if trv != nil {
s.Allowed = trv.allowedColumns(action)
switch action {
case QTQuery:
s.Functions = !trv.query.disable.funcs
s.Paging.Limit = trv.query.limit
case QTInsert:
s.PresetMap = trv.insert.psmap
s.PresetList = trv.insert.pslist
case QTUpdate:
s.PresetMap = trv.update.psmap
s.PresetList = trv.update.pslist
}
}
err := com.compileArgs(qc, s, field.Args, role)
if err != nil {
return err
@ -394,13 +417,6 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
// Order is important AddFilters must come after compileArgs
com.AddFilters(qc, s, role)
if s.ParentID == -1 {
qc.Roots = append(qc.Roots, s.ID)
} else {
p := &selects[s.ParentID]
p.Children = append(p.Children, s.ID)
}
s.Cols = make([]Column, 0, len(field.Children))
action = QTQuery
@ -440,14 +456,10 @@ func (com *Compiler) compileQuery(qc *QCode, op *Operation, role string) error {
func (com *Compiler) AddFilters(qc *QCode, sel *Select, role string) {
var fil *Exp
var nu bool // user required (or not) in this filter
var nu bool // need user_id (or not) in this filter
if trv, ok := com.tr[role][sel.Name]; ok {
fil, nu = trv.filter(qc.Type)
} else if com.db && role == "anon" {
// Tables not defined under the anon role will not be rendered
sel.SkipRender = true
}
if fil == nil {
@ -838,14 +850,17 @@ func (com *Compiler) compileArgAfterBefore(sel *Select, arg *Arg, pt PagingType)
return nil, false
}
var zeroTrv = &trval{}
// var zeroTrv = &trval{}
func (com *Compiler) getRole(role, field string) *trval {
if trv, ok := com.tr[role][field]; ok {
return trv
} else {
return zeroTrv
}
return nil
// } else {
// return zeroTrv
// }
}
func AddFilter(sel *Select, fil *Exp) {
@ -1015,10 +1030,15 @@ func setListVal(ex *Exp, node *Node) {
case NodeFloat:
ex.ListType = ValFloat
}
} else {
ex.Val = node.Val
return
}
for i := range node.Children {
ex.ListVal = append(ex.ListVal, node.Children[i].Val)
}
}
func setWhereColName(ex *Exp, node *Node) {

View File

@ -11,7 +11,6 @@ import (
"strings"
"github.com/dosco/super-graph/core/internal/allow"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/valyala/fasttemplate"
)
@ -103,9 +102,6 @@ func (sg *SuperGraph) prepareStmt(item allow.Item) error {
// logger.Debug().Msgf("Prepared statement 'query %s' (anon)", item.Name)
stmts2, err := sg.buildRoleStmt(qb, vars, "anon")
if err == psql.ErrAllTablesSkipped {
return nil
}
if err != nil {
return err
}
@ -121,9 +117,6 @@ func (sg *SuperGraph) prepareStmt(item allow.Item) error {
// logger.Debug().Msgf("Prepared statement 'mutation %s' (%s)", item.Name, role.Name)
stmts, err := sg.buildRoleStmt(qb, vars, role.Name)
if err == psql.ErrAllTablesSkipped {
continue
}
if err != nil {
return err
}
@ -141,7 +134,7 @@ func (sg *SuperGraph) prepareStmt(item allow.Item) error {
func (sg *SuperGraph) prepare(ct context.Context, st []stmt, key string) error {
finalSQL, am := processTemplate(st[0].sql)
sd, err := sg.db.Prepare(finalSQL)
sd, err := sg.db.PrepareContext(ct, finalSQL)
if err != nil {
return fmt.Errorf("prepare failed: %v: %s", err, finalSQL)
}

View File

@ -1,40 +0,0 @@
01a106d5.06939d67.js,1589776216137,2e1ce67f6cf79a8a8e2070fc4ea4a6104ac73a5b26a1ab10b62f6cd8e45a8074
1.1c32171f.js.LICENSE.txt,1589776216144,31b4d50dbbd144da150dcdcf0ccef8f6cf8b6b5204d5c9adde3b24466777fad5
0e384e19.7f29b403.js,1589776216137,e2c3882226f2a601b65e4bb1fdb771296c1946f9f125c90af4a8f451dfd2c867
19.fdfbe826.js.LICENSE.txt,1589776216145,6ad95a8099127a8d42b5ace6d148064b1d3e922174f08d75d0ee2220ebeacd0b
17896441.183211f5.js,1589776216137,7736db62d7498a8d3a10a617b1bdfac08c8f29dc03329f4ad3320f2571c223c0
20ac7829.c04b4a1e.js,1589776216137,5b95f479848ccd6959630d4a24bd551d0dbc74457911e9b6f3498655bfaf8ea7
1.1c32171f.js,1589776216137,5441b74bfad9f5a37ba0e6123621c73c3e3b9064bda6b9dcf62fdb7381bf8e41
2.8f12478f.js,1589776216137,3ac7ca0df8fca86145f5decbd86c8adfbc6b5b11a5be96fc96cc9bc33d6306e6
395f47e2.28d67f37.js,1589776216137,8a9b6bc2afdd99ca2b1827c8289352fab6163e30151b9701c29a7863b6cd00b6
404.html,1589776218438,0a748eaa7614b1982623360ba8554c0f498b0796ead3cc429a2c84d287084b50
3d9c95a4.c89589de.js,1589776216137,d5c45e5a3671f303683451d448e2e5d5b464f041cde683af6e824b9e7f951412
9225b3a9.a5e6036b.js,1589776216137,ec9a0d4b34d8751f74348d0da369625a18f320c9ed5ab3c5ccf047ead2551bd8
741df2ae.e13b96b2.js,1589776216137,12028f0cbdf783ac91ea42db64d91190ebd4df24cc74162f953aacc75d16d078
969d212d.9fc45877.js,1589776216138,8323c9f2db042bfaa2ebba43d9500bed881a694d0bfc27fd796cec95bb032dc5
c4f5d8e4.47e70b85.js,1589776216145,6f986b48720724e7c8a715812b5f6625c71c8eca258bb4b410a447eb5da52734
index.html,1589776218438,89f81ec3d3be439a827bd61448dcaddb71c33422df7baa88a7bbcdf784dbc0b2
98ce8162.b5ace15d.js,1589776216137,935e1c6dd08f7e9d0d00221559b95f0f649e28ddf64be6bbb7b3e65bae1aba72
main.e30d99cd.js.LICENSE.txt,1589776216144,1d906c3b83eacffe298d21eeb73e6e73e96310983224783d236195098e6765a7
runtime~main.366c29ad.js,1589776216145,0e550cc9522cd99c5fa4097c7db629eef56127a7f8ade0b7c9954cc8f6a01239
5043549d.62508ecf.js,1589776216137,383959b80d2b0c6416e83c9640ea03c666fe92c407e13a6f022b58072feeafd2
99e04881.197dcef6.js,1589776216144,af99883cbd4d58fbac7cbf814be33032b77bc8daf856aed54bdf0bf27ed5708d
sitemap.xml,1589776218455,660ed269bf0306ba47ecdfb638e487147784d614c43c6c4a8e84194973baf183
styles.9155f1d2.js,1589776216137,f1e0863928710e67338dc88c37f47ef3ff164d36c4bba40d005561094c9c3284
db32d859.a032827a.js,1589776216145,36d575ffad747898726a97cb7a3551e636f744218595bea5c060536eb8d8390f
docs/advanced/index.html,1589776218439,31171870786a597597de9417978a27253581c013962e39959ae4c0777bf86c28
docs/deploy/index.html,1589776218440,7a4735edb93006311b704e62b843bf89bc4354fdf0fdc22a0c5802e39878c193
docs/home/index.html,1589776218440,c7fbb0c1084c6ef8858775c5083b6b416b8188942d4402a5a625eadb3bc00942
docs/intro/index.html,1589776218440,c7a50ae98c0b279f422e55c2eeb9f7ba1c7c1a8bcac07be11fd6e05ced224094
img/super-graph-logo.svg,1589776218438,66a865c4936f44ea811464b967f221b615b7553e85dca0d6f1ef620da3911857
docs/react/index.html,1589776218440,f76fc976f3491d9aacf19ce3b34bee1339f87c673a9da95f192683615618f210
docs/why/index.html,1589776218440,4aa380fe4e5d8476645e368d1f708d5d1344331c572383db823c3499fa0c99cc
docs/security/index.html,1589776218440,0c7d466dc143935db8c02a448952cae2465635e4b6782b1682449bbd56807917
styles.8ee0cad4.css,1589776216137,34b2e79c5c5b1f7afda4376e422e8ccb2c3c04213ca09d788f0c68ecf153d6e6
docs/config/index.html,1589776218440,25b6e87a42c163ac966e80acebca8708f56ae95ba8f3ed8b98ff7fd70ca5a222
docs/internals/index.html,1589776218440,b6f2136a1c832f421a46329fb1f39269d820c55a0dfc9351848271a5501d8e6e
docs/start/index.html,1589776218440,485ec2c61117d8940d8028f34d51d421995a814d5b9d4d5a1870adaed48aec2c
docs/graphql/index.html,1589776218440,3bd79f703fe67656884f3121bfddc3a4fc4d9e5bb2bf9271c94014058fbbd806
main.e30d99cd.js,1589776216144,98a4087d6f537aaddbc1225aaabfb4d12d1394772deb618d4d457685cee59311
19.fdfbe826.js,1589776216144,b8abb73aea5fc0aa50d7e8b8bd38984e3b3aec62de2faf66fb3e55fd1428f8a7
server.bundle.js,1589776218438,826db37f1de931e8b088c1ff20b4a3c2fe0c3d54d9ff4020e500f0df1b83a616

View File

@ -10,7 +10,7 @@ You can then add your database schema to the migrations, maybe create some seed
```bash
# Download and install Super Graph. You will need Go 1.14 or above
go get https://github.com/dosco/super-graph
go get github.com/dosco/super-graph
```
And then create and launch your new app

View File

@ -0,0 +1,82 @@
---
id: telemetry
title: Tracing and Metrics
sidebar_label: Telemetry
---
import useBaseUrl from '@docusaurus/useBaseUrl'; // Add to the top of the file below the front matter.
Having observability and telemetry is at the core of any production ready service. Super Graph has built-in support for OpenCensus for tracing requests all the way from HTTP to the database and providing all kinds of metrics.
OpenCensus has a concept called exporters these are external services that can consume this data and make to give you graphs, charts, alerting etc. Super Graph again has built-in support for Zipkin, Prometheus, Google Stackdriver and the AWS X-Ray exporters.
## Telemetry config
The `telemetry` section of the standard config files is where you set values to configure this feature to your needs.
```yaml
telemetry:
debug: true
interval: 5s
metrics:
exporter: "prometheus"
endpoint: ""
namespace: "web api"
key: "1234xyz"
tracing:
exporter: "zipkin"
endpoint: "http://zipkin:9411/api/v2/spans"
sample: 0.2
include_query: false
include_params: false
```
**debug**: Enabling debug enables an embedded web ui to test and debug tracing and metrics. This UI called `zPages` is provided by OpenCensus and will be made available on the `/telemetry` path. For more information on using `zPages` https://opencensus.io/zpages/. Remeber to disable this in production.
**interval**: This controls the interval setting for OpenCensus metrics collection. This deafaults to `5 seconds` if not set.
**metric.exporters** Setting this enables metrics collection. The supported values for this field are `prometheus` and `stackdriver`. The Prometheus exporter requires `metric.namespace` to be set. The Sackdriver exporter requires the `metric.key` to be set to the Google Cloud Project ID.
**metric.endpoint** Is not currently used by any of the exporters.
**tracing.exporter** Setting this enables request tracing. The supported values for this field are `zipkin`, `aws` and `xray`. Zipkin requires `tracing.endpoint` to be set. AWS and Xray are the same and do not require any addiitonal settings.
**tracing.sample** This controls what percentage of the requests should be traced. By default `0.5` or 50% of the requests are traced, `always` is also a valid value for this field and it means all requests will be traced.
**include_query** Include the Super Graph SQL query to the trace. Be careful with this setting in production it will add the entire SQL query to the trace. This can be veru useful to debug slow requests.
**include_params** Include the Super Graph SQL query parameters to the trace. Be careful with this setting in production it will it can potentially leak sensitive user information into tracing logs.
## Using Zipkin
Zipkin is a really great open source request tracing project. It's easy to add to your current Super Graph app as a way to test tracing in development. Add the following to the Super Graph generated `docker-compose.yml` file. Also add `zipkin` in your current apps `depends_on` list. Once setup the Zipkin UI is available at http://localhost:9411
```yaml
your_api:
...
depends_on:
- db
- zipkin
zipkin:
image: openzipkin/zipkin-slim
container_name: zipkin
# Environment settings are defined here https://github.com/openzipkin/zipkin/blob/master/zipkin-server/README.md#environment-variables
environment:
- STORAGE_TYPE=mem
# Uncomment to enable self-tracing
# - SELF_TRACING_ENABLED=true
# Uncomment to enable debug logging
# - JAVA_OPTS=-Dorg.slf4j.simpleLogger.log.zipkin2=debug
ports:
# Port used for the Zipkin UI and HTTP Api
- 9411:9411
```
### Zipkin HTTP to DB traces
<img alt="Zipkin Traces" src={useBaseUrl("img/zipkin1.png")} />
### Zipkin trace details
<img alt="Zipkin Traces" src={useBaseUrl('img/zipkin2.png')} />

View File

@ -9,6 +9,7 @@ module.exports = {
"react",
"advanced",
"security",
"telemetry",
"config",
"deploy",
"internals",

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

View File

@ -52,8 +52,9 @@ type Serv struct {
// Telemetry struct contains OpenCensus metrics and tracing related config
Telemetry struct {
Debug bool
Metrics struct {
Debug bool
Interval *time.Duration
Metrics struct {
Exporter string
Endpoint string
Namespace string
@ -61,9 +62,11 @@ type Serv struct {
}
Tracing struct {
Exporter string
Endpoint string
Sample string
Exporter string
Endpoint string
Sample string
IncludeQuery bool `mapstructure:"include_query"`
IncludeParams bool `mapstructure:"include_params"`
}
}

View File

@ -55,7 +55,7 @@ func cmdDBReset(cmd *cobra.Command, args []string) {
func cmdDBCreate(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf, false)
db, err := initDB(conf, false, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -74,7 +74,7 @@ func cmdDBCreate(cmd *cobra.Command, args []string) {
func cmdDBDrop(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf, false)
db, err := initDB(conf, false, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -132,7 +132,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
initConfOnce()
dest := args[0]
conn, err := initDB(conf, true)
conn, err := initDB(conf, true, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -224,7 +224,7 @@ func cmdDBMigrate(cmd *cobra.Command, args []string) {
func cmdDBStatus(cmd *cobra.Command, args []string) {
initConfOnce()
db, err := initDB(conf, true)
db, err := initDB(conf, true, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}

View File

@ -26,10 +26,9 @@ func cmdDBSeed(cmd *cobra.Command, args []string) {
if conf, err = initConf(); err != nil {
log.Fatalf("ERR failed to read config: %s", err)
}
conf.Production = false
db, err = initDB(conf, true)
db, err = initDB(conf, true, false)
if err != nil {
log.Fatalf("ERR failed to connect to database: %s", err)
}
@ -80,6 +79,8 @@ func graphQLFunc(sg *core.SuperGraph, query string, data interface{}, opt map[st
if v, ok := opt["user_id"]; ok && len(v) != 0 {
ct = context.WithValue(ct, core.UserIDKey, v)
} else {
ct = context.WithValue(ct, core.UserIDKey, "-1")
}
// var role string

View File

@ -19,7 +19,7 @@ func cmdServ(cmd *cobra.Command, args []string) {
initWatcher()
db, err = initDB(conf, true)
db, err = initDB(conf, true, true)
if err != nil {
fatalInProd(err, "failed to connect to database")
}

View File

@ -112,7 +112,7 @@ func GetConfigName() string {
}
func (c *Config) telemetryEnabled() bool {
return c.Telemetry.Metrics.Exporter != "" || c.Telemetry.Tracing.Exporter != ""
return c.Telemetry.Debug || c.Telemetry.Metrics.Exporter != "" || c.Telemetry.Tracing.Exporter != ""
}
func (c *Config) relPath(p string) string {

View File

@ -7,8 +7,8 @@ import (
var healthyResponse = []byte("All's Well")
func health(w http.ResponseWriter, _ *http.Request) {
ct, cancel := context.WithTimeout(context.Background(), conf.DB.PingTimeout)
func health(w http.ResponseWriter, r *http.Request) {
ct, cancel := context.WithTimeout(r.Context(), conf.DB.PingTimeout)
defer cancel()
if err := db.PingContext(ct); err != nil {

View File

@ -10,6 +10,8 @@ import (
"github.com/dosco/super-graph/core"
"github.com/dosco/super-graph/internal/serv/internal/auth"
"github.com/rs/cors"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/trace"
"go.uber.org/zap"
)
@ -44,7 +46,7 @@ func apiV1Handler() http.Handler {
AllowCredentials: true,
Debug: conf.DebugCORS,
})
h = c.Handler(h)
return c.Handler(h)
}
return h
@ -78,6 +80,22 @@ func apiV1(w http.ResponseWriter, r *http.Request) {
doLog := true
res, err := sg.GraphQL(ct, req.Query, req.Vars)
if conf.telemetryEnabled() {
span := trace.FromContext(ct)
span.AddAttributes(
trace.StringAttribute("operation", res.OperationName()),
trace.StringAttribute("query_name", res.QueryName()),
trace.StringAttribute("role", res.Role()),
)
if err != nil {
span.AddAttributes(trace.StringAttribute("error", err.Error()))
}
ochttp.SetRoute(ct, apiRoute)
}
if !conf.Production && res.QueryName() == introspectionQuery {
doLog = false
}

View File

@ -111,13 +111,10 @@ func initConf() (*Config, error) {
c.UseAllowList = true
}
// In anon role block all tables that are not defined in the role
c.DefaultBlock = true
return c, nil
}
func initDB(c *Config, useDB bool) (*sql.DB, error) {
func initDB(c *Config, useDB, useTelemetry bool) (*sql.DB, error) {
var db *sql.DB
var err error
@ -217,14 +214,35 @@ func initDB(c *Config, useDB bool) (*sql.DB, error) {
// return errors.New("failed to open db")
// }
if conf.telemetryEnabled() {
driverName, err = ocsql.Register(driverName, ocsql.WithAllTraceOptions(), ocsql.WithInstanceName(conf.AppName))
if useTelemetry && conf.telemetryEnabled() {
opts := ocsql.TraceOptions{
AllowRoot: true,
Ping: true,
RowsNext: true,
RowsClose: true,
RowsAffected: true,
LastInsertID: true,
Query: conf.Telemetry.Tracing.IncludeQuery,
QueryParams: conf.Telemetry.Tracing.IncludeParams,
}
opt := ocsql.WithOptions(opts)
name := ocsql.WithInstanceName(conf.AppName)
driverName, err = ocsql.Register(driverName, opt, name)
if err != nil {
return nil, fmt.Errorf("unable to register ocsql driver: %v", err)
}
ocsql.RegisterAllViews()
//defer ocsql.RecordStats(db, 2*time.Second)()
var interval time.Duration
if conf.Telemetry.Interval != nil {
interval = *conf.Telemetry.Interval
} else {
interval = 5 * time.Second
}
defer ocsql.RecordStats(db, interval)()
log.Println("INF OpenCensus telemetry enabled")
}
@ -242,9 +260,5 @@ func initDB(c *Config, useDB bool) (*sql.DB, error) {
return nil, fmt.Errorf("unable to open db connection: %v", err)
}
if conf.telemetryEnabled() {
defer ocsql.RecordStats(db, 2*time.Second)()
}
return db, nil
}

File diff suppressed because one or more lines are too long

View File

@ -13,6 +13,11 @@ import (
rice "github.com/GeertJohan/go.rice"
"github.com/NYTimes/gziphandler"
"github.com/dosco/super-graph/internal/serv/internal/auth"
"go.opencensus.io/plugin/ochttp"
)
var (
apiRoute string = "/api/v1/graphql"
)
func initWatcher() {
@ -76,6 +81,10 @@ func startHTTP() {
MaxHeaderBytes: 1 << 20,
}
if conf.telemetryEnabled() {
srv.Handler = &ochttp.Handler{Handler: routes}
}
idleConnsClosed := make(chan struct{})
go func() {
sigint := make(chan os.Signal, 1)
@ -114,8 +123,6 @@ func routeHandler() (http.Handler, error) {
return mux, nil
}
apiRoute := "/api/v1/graphql"
if len(conf.APIPath) != 0 {
apiRoute = path.Join("/", conf.APIPath, "/v1/graphql")
}
@ -178,6 +185,10 @@ func setActionRoutes(routes map[string]http.Handler) error {
routes[p] = fn
}
if conf.telemetryEnabled() {
routes[p] = ochttp.WithRouteTag(routes[p], p)
}
if err != nil {
return err
}

View File

@ -57,7 +57,7 @@ func enableObservability(mux *http.ServeMux) (func(), error) {
}
case "":
log.Println("INF No OpenCensus metrics exporter initialized")
log.Println("WRN OpenCensus: no metrics exporter defined")
default:
err = fmt.Errorf("invalid metrics exporter")
@ -96,14 +96,16 @@ func enableObservability(mux *http.ServeMux) (func(), error) {
tex = zipkin.NewExporter(re, lep)
case "":
log.Println("INF No OpenCensus tracing exporter initialized")
log.Println("WRN OpenCensus: no traceing exporter defined")
default:
err = fmt.Errorf("invalid tracing exporter")
}
if err != nil {
return nil, fmt.Errorf("ERR OpenCensus: %s: %v", conf.Telemetry.Tracing, err)
return nil, fmt.Errorf("ERR OpenCensus: %s: %v",
conf.Telemetry.Tracing.Exporter,
err)
}
if tex != nil {